diff --git a/.codespellrc b/.codespellrc
index 269fd80..6d932b7 100644
--- a/.codespellrc
+++ b/.codespellrc
@@ -1,5 +1,6 @@
[codespell]
-skip = .git,*.pdf,*.svg
-ignore-regex = ^\s*"image/\S+": ".*
+skip=.git,*.pdf,*.svg,*.ipynb
+ignore-regex=^\s*"image/\S+": ".*
+
#
-# ignore-words-list =
+ignore-words-list=shepard,nevers,nin,DaSy,Fo,Alle,ND
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 3ebbf55..dfea6b3 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -1,4 +1,3 @@
----
name: Codespell
on:
@@ -18,5 +17,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
+
- name: Codespell
uses: codespell-project/actions-codespell@v2
diff --git a/completed_tutorials/01-DataJoint Basics.ipynb b/completed_tutorials/01-DataJoint Basics.ipynb
index 25d84bf..5469437 100644
--- a/completed_tutorials/01-DataJoint Basics.ipynb
+++ b/completed_tutorials/01-DataJoint Basics.ipynb
@@ -249,7 +249,7 @@
}
],
"source": [
- "schema = dj.schema('tutorial')"
+ "schema = dj.schema(\"tutorial\")"
]
},
{
@@ -421,7 +421,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse.insert1((0, '2017-03-01', 'M'))"
+ "Mouse.insert1((0, \"2017-03-01\", \"M\"))"
]
},
{
@@ -533,11 +533,7 @@
"metadata": {},
"outputs": [],
"source": [
- "data = {\n",
- " 'mouse_id': 100,\n",
- " 'dob': '2017-05-12',\n",
- " 'sex': 'F'\n",
- "}"
+ "data = {\"mouse_id\": 100, \"dob\": \"2017-05-12\", \"sex\": \"F\"}"
]
},
{
@@ -661,11 +657,7 @@
"metadata": {},
"outputs": [],
"source": [
- "data = [\n",
- " (1, '2016-11-19', 'M'),\n",
- " (2, '2016-11-20', 'unknown'),\n",
- " (5, '2016-12-25', 'F')\n",
- "]"
+ "data = [(1, \"2016-11-19\", \"M\"), (2, \"2016-11-20\", \"unknown\"), (5, \"2016-12-25\", \"F\")]"
]
},
{
@@ -691,8 +683,8 @@
"outputs": [],
"source": [
"data = [\n",
- " {'mouse_id': 10, 'dob': '2017-01-01', 'sex': 'F'},\n",
- " {'mouse_id': 11, 'dob': '2017-01-03', 'sex': 'F'},\n",
+ " {\"mouse_id\": 10, \"dob\": \"2017-01-01\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 11, \"dob\": \"2017-01-03\", \"sex\": \"F\"},\n",
"]\n",
"\n",
"# insert them all\n",
@@ -848,10 +840,12 @@
],
"source": [
"Mouse.insert1(\n",
- "{'mouse_id': 0,\n",
- " 'dob': '2018-01-01',\n",
- " 'sex': 'M',\n",
- "})"
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"dob\": \"2018-01-01\",\n",
+ " \"sex\": \"M\",\n",
+ " }\n",
+ ")"
]
},
{
@@ -868,11 +862,11 @@
"outputs": [],
"source": [
"data = [\n",
- " {'mouse_id': 12, 'dob': '2017-03-21', 'sex': 'F'},\n",
- " {'mouse_id': 18, 'dob': '2017-05-01', 'sex': 'F'},\n",
- " {'mouse_id': 19, 'dob': '2018-07-21', 'sex': 'M'},\n",
- " {'mouse_id': 22, 'dob': '2019-12-15', 'sex': 'F'},\n",
- " {'mouse_id': 34, 'dob': '2018-09-22', 'sex': 'M'}\n",
+ " {\"mouse_id\": 12, \"dob\": \"2017-03-21\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 18, \"dob\": \"2017-05-01\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 19, \"dob\": \"2018-07-21\", \"sex\": \"M\"},\n",
+ " {\"mouse_id\": 22, \"dob\": \"2019-12-15\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 34, \"dob\": \"2018-09-22\", \"sex\": \"M\"},\n",
"]\n",
"\n",
"# insert them all\n",
@@ -1014,7 +1008,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE - Insert more mice\n"
+ "# ENTER YOUR CODE - Insert more mice"
]
},
{
@@ -1135,10 +1129,10 @@
"outputs": [],
"source": [
"data = {\n",
- " 'mouse_id': 0,\n",
- " 'session_date': '2017-05-15',\n",
- " 'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker'\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
"}\n",
"\n",
"Session.insert1(data)"
@@ -1362,10 +1356,10 @@
],
"source": [
"data = {\n",
- " 'mouse_id': 0,\n",
- " 'session_date': '2018-01-15',\n",
- " 'experiment_setup': 100,\n",
- " 'experimenter': 'Jacob Reimer'\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2018-01-15\",\n",
+ " \"experiment_setup\": 100,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
"}\n",
"\n",
"Session.insert1(data)\n",
@@ -1387,10 +1381,10 @@
"outputs": [],
"source": [
"data = {\n",
- " 'mouse_id': 18,\n",
- " 'session_date': '2018-01-15',\n",
- " 'experiment_setup': 101,\n",
- " 'experimenter': 'Jacob Reimer'\n",
+ " \"mouse_id\": 18,\n",
+ " \"session_date\": \"2018-01-15\",\n",
+ " \"experiment_setup\": 101,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
"}\n",
"\n",
"# insert them all\n",
@@ -1525,10 +1519,10 @@
"outputs": [],
"source": [
"bad_data = {\n",
- " 'mouse_id': 9999, # this mouse doesn't exist!\n",
- " 'session_date': '2017-05-15',\n",
- " 'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker'\n",
+ " \"mouse_id\": 9999, # this mouse doesn't exist!\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
"}"
]
},
@@ -1709,7 +1703,7 @@
}
],
"source": [
- "Mouse & 'mouse_id = 0'"
+ "Mouse & \"mouse_id = 0\""
]
},
{
@@ -2886,7 +2880,7 @@
}
],
"source": [
- "female_mice & 'mouse_id > 10'"
+ "female_mice & \"mouse_id > 10\""
]
},
{
@@ -3006,7 +3000,7 @@
}
],
"source": [
- "Mouse & Session "
+ "Mouse & Session"
]
},
{
@@ -3999,7 +3993,7 @@
}
],
"source": [
- "Mouse.proj('sex')"
+ "Mouse.proj(\"sex\")"
]
},
{
@@ -4129,7 +4123,7 @@
}
],
"source": [
- "Mouse.proj(date_of_birth='dob')"
+ "Mouse.proj(date_of_birth=\"dob\")"
]
},
{
@@ -4239,7 +4233,7 @@
}
],
"source": [
- "(Mouse * Session).proj(age='datediff(session_date, dob)')"
+ "(Mouse * Session).proj(age=\"datediff(session_date, dob)\")"
]
},
{
@@ -4379,7 +4373,7 @@
}
],
"source": [
- "(Mouse * Session).proj(..., age='datediff(session_date, dob)')"
+ "(Mouse * Session).proj(..., age=\"datediff(session_date, dob)\")"
]
},
{
@@ -4690,7 +4684,7 @@
}
],
"source": [
- "(Mouse & 'sex = \"M\"').fetch(format='frame')"
+ "(Mouse & 'sex = \"M\"').fetch(format=\"frame\")"
]
},
{
@@ -4717,7 +4711,7 @@
}
],
"source": [
- "(Mouse & 'sex = \"M\"').fetch('KEY')"
+ "(Mouse & 'sex = \"M\"').fetch(\"KEY\")"
]
},
{
@@ -4733,7 +4727,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sex, dob = Mouse.fetch('sex', 'dob')"
+ "sex, dob = Mouse.fetch(\"sex\", \"dob\")"
]
},
{
@@ -4818,7 +4812,7 @@
}
],
"source": [
- "info = Mouse.fetch('sex', 'dob', as_dict=True)\n",
+ "info = Mouse.fetch(\"sex\", \"dob\", as_dict=True)\n",
"info"
]
},
@@ -4853,7 +4847,9 @@
}
],
"source": [
- "mouse_0 = (Mouse & {'mouse_id': 0}).fetch1() # \"fetch1()\" because we know there's only one\n",
+ "mouse_0 = (\n",
+ " Mouse & {\"mouse_id\": 0}\n",
+ ").fetch1() # \"fetch1()\" because we know there's only one\n",
"mouse_0"
]
},
@@ -4881,7 +4877,7 @@
}
],
"source": [
- "(Mouse & {'mouse_id': 0}).fetch1('KEY')"
+ "(Mouse & {\"mouse_id\": 0}).fetch1(\"KEY\")"
]
},
{
@@ -4897,7 +4893,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sex, dob = (Mouse & {'mouse_id': 0}).fetch1('sex', 'dob')"
+ "sex, dob = (Mouse & {\"mouse_id\": 0}).fetch1(\"sex\", \"dob\")"
]
},
{
@@ -4978,7 +4974,7 @@
}
],
"source": [
- "(Mouse & 'mouse_id = 100').delete()"
+ "(Mouse & \"mouse_id = 100\").delete()"
]
},
{
diff --git a/completed_tutorials/02-Calcium Imaging Imported Tables.ipynb b/completed_tutorials/02-Calcium Imaging Imported Tables.ipynb
index dbb2526..becf184 100644
--- a/completed_tutorials/02-Calcium Imaging Imported Tables.ipynb
+++ b/completed_tutorials/02-Calcium Imaging Imported Tables.ipynb
@@ -50,6 +50,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -433,12 +434,30 @@
"metadata": {},
"outputs": [],
"source": [
- "Scan.insert([\n",
- " {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 1, \n",
- " 'depth': 150, 'wavelength': 920, 'laser_power': 26, 'fps': 15, 'file_name': 'example_scan_01.tif'},\n",
- " {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 2, \n",
- " 'depth': 200, 'wavelength': 920, 'laser_power': 24, 'fps': 15, 'file_name': 'example_scan_02.tif'},\n",
- "])"
+ "Scan.insert(\n",
+ " [\n",
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"scan_idx\": 1,\n",
+ " \"depth\": 150,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 26,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_01.tif\",\n",
+ " },\n",
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"scan_idx\": 2,\n",
+ " \"depth\": 200,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 24,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_02.tif\",\n",
+ " },\n",
+ " ]\n",
+ ")"
]
},
{
@@ -594,7 +613,8 @@
],
"source": [
"from skimage import io\n",
- "im = io.imread('../data/example_scan_01.tif')\n",
+ "\n",
+ "im = io.imread(\"../data/example_scan_01.tif\")\n",
"print(im.shape)"
]
},
@@ -832,8 +852,11 @@
" ---\n",
" average_frame : longblob # average fluorescence across frames\n",
" \"\"\"\n",
- " def make(self, key): # key is the primary key of one of the entries in the table `Scan`\n",
- " print('key is', key)"
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is the primary key of one of the entries in the table `Scan`\n",
+ " print(\"key is\", key)"
]
},
{
@@ -1036,27 +1059,30 @@
" ---\n",
" average_frame : longblob # average fluorescence across frames\n",
" \"\"\"\n",
- " def make(self, key): # key is the primary key of one of the entries in the table `Scan`\n",
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is the primary key of one of the entries in the table `Scan`\n",
" import os\n",
- " \n",
+ "\n",
" # fetch data directory from table Session\n",
- " data_path = (Session & key).fetch1('data_path')\n",
- " \n",
+ " data_path = (Session & key).fetch1(\"data_path\")\n",
+ "\n",
" # fetch data file name from table Scan\n",
- " file_name = (Scan & key).fetch1('file_name')\n",
- " \n",
+ " file_name = (Scan & key).fetch1(\"file_name\")\n",
+ "\n",
" # load the file\n",
" im = io.imread(os.path.join(data_path, file_name))\n",
" # compute the average image across the frames\n",
" avg_image = np.mean(im, axis=0)\n",
- " \n",
+ "\n",
" # Now prepare the entry as a dictionary with all fields defined in the table.\n",
- " key['average_frame'] = avg_image # inherit the primary key from the table Scan\n",
- " \n",
+ " key[\"average_frame\"] = avg_image # inherit the primary key from the table Scan\n",
+ "\n",
" # insert entry with the method `insert1()`\n",
" self.insert1(key)\n",
- " \n",
- " print('\\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}'.format(**key))"
+ "\n",
+ " print(\"\\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}\".format(**key))"
]
},
{
@@ -1238,16 +1264,18 @@
"metadata": {},
"outputs": [],
"source": [
- "Scan.insert1({\n",
- " 'mouse_id': 100,\n",
- " 'session_date': '2017-05-25',\n",
- " 'scan_idx': 1,\n",
- " 'depth': 150,\n",
- " 'wavelength': 920,\n",
- " 'laser_power': 25,\n",
- " 'fps': 15,\n",
- " 'file_name': 'example_scan_03.tif'\n",
- "})"
+ "Scan.insert1(\n",
+ " {\n",
+ " \"mouse_id\": 100,\n",
+ " \"session_date\": \"2017-05-25\",\n",
+ " \"scan_idx\": 1,\n",
+ " \"depth\": 150,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 25,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_03.tif\",\n",
+ " }\n",
+ ")"
]
},
{
@@ -1542,8 +1570,8 @@
}
],
"source": [
- "key = dict(mouse_id=0, session_date='2017-05-15', scan_idx=1)\n",
- "avg_image = (AverageFrame & key).fetch1('average_frame')\n",
+ "key = dict(mouse_id=0, session_date=\"2017-05-15\", scan_idx=1)\n",
+ "avg_image = (AverageFrame & key).fetch1(\"average_frame\")\n",
"plt.imshow(avg_image, cmap=plt.cm.gray)"
]
},
diff --git a/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb b/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb
index 0fd34f6..6591c3c 100644
--- a/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb
+++ b/completed_tutorials/03-Calcium Imaging Computed Tables.ipynb
@@ -53,6 +53,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -496,7 +497,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = AverageFrame.fetch('KEY')\n",
+ "keys = AverageFrame.fetch(\"KEY\")\n",
"\n",
"# pick one key\n",
"key = keys[0]"
@@ -609,7 +610,7 @@
"metadata": {},
"outputs": [],
"source": [
- "avg_image = AverageFrame.fetch('average_frame')"
+ "avg_image = AverageFrame.fetch(\"average_frame\")"
]
},
{
@@ -698,7 +699,7 @@
"metadata": {},
"outputs": [],
"source": [
- "avg_image = (AverageFrame & key).fetch1('average_frame')"
+ "avg_image = (AverageFrame & key).fetch1(\"average_frame\")"
]
},
{
@@ -911,7 +912,7 @@
"source": [
"label_im, nb_labels = ndimage.label(mask)\n",
"print(nb_labels)\n",
- "plt.imshow(label_im) "
+ "plt.imshow(label_im)"
]
},
{
@@ -971,7 +972,7 @@
],
"source": [
"size_cutoff = 50\n",
- "sizes = np.array([np.sum(label_im==i) for i in np.unique(label_im)])\n",
+ "sizes = np.array([np.sum(label_im == i) for i in np.unique(label_im)])\n",
"\n",
"small_size_filter = sizes < size_cutoff\n",
"pixel_to_remove = small_size_filter[label_im]\n",
@@ -1001,7 +1002,7 @@
"outputs": [],
"source": [
"rois = []\n",
- "for i in np.unique(label_im)[1:]: # 0 is the background\n",
+ "for i in np.unique(label_im)[1:]: # 0 is the background\n",
" rois.append(label_im == i)"
]
},
@@ -1211,6 +1212,7 @@
" ---\n",
" segmented_masks : longblob # overview of segmented masks\n",
" \"\"\"\n",
+ "\n",
" class Roi(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -1402,6 +1404,7 @@
" ---\n",
" segmented_masks : longblob # overview of segmented masks\n",
" \"\"\"\n",
+ "\n",
" class Roi(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -1409,40 +1412,43 @@
" ---\n",
" mask : longblob # mask of this roi\n",
" \"\"\"\n",
- " \n",
- " def make(self, key): # key is one of the primary keys of the join product of AverageFrame and ParameterSet\n",
- " \n",
- " print('Populating for: ', key)\n",
- " \n",
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is one of the primary keys of the join product of AverageFrame and ParameterSet\n",
+ " print(\"Populating for: \", key)\n",
+ "\n",
" # fetch average image from the previous table AverageFrame\n",
- " avg_image = (AverageFrame & key).fetch1('average_frame')\n",
- " \n",
+ " avg_image = (AverageFrame & key).fetch1(\"average_frame\")\n",
+ "\n",
" # fetch the parameters threshold and size_cutoff\n",
" threshold, size_cutoff = (SegmentationParam & key).fetch1(\n",
- " 'threshold', 'size_cutoff')\n",
- " \n",
+ " \"threshold\", \"size_cutoff\"\n",
+ " )\n",
+ "\n",
" # perform the thresholding and blob detection\n",
" mask = avg_image > threshold\n",
" label_im, nb_labels = ndimage.label(mask)\n",
- " sizes = np.array([np.sum(label_im==i) for i in np.unique(label_im)])\n",
+ " sizes = np.array([np.sum(label_im == i) for i in np.unique(label_im)])\n",
"\n",
" small_size_filter = sizes < size_cutoff\n",
" pixel_to_remove = small_size_filter[label_im]\n",
"\n",
" label_im[pixel_to_remove] = 0\n",
- " \n",
+ "\n",
" rois = []\n",
- " for i in np.unique(label_im)[1:]: # 0 is the background\n",
+ " for i in np.unique(label_im)[1:]: # 0 is the background\n",
" rois.append(\n",
- " dict(**key, # inherit primary key from master table\n",
- " roi_idx=i, \n",
- " mask=label_im==i))\n",
- " \n",
+ " dict(\n",
+ " **key, # inherit primary key from master table\n",
+ " roi_idx=i,\n",
+ " mask=label_im == i\n",
+ " )\n",
+ " )\n",
+ "\n",
" # insert into the master table first\n",
- " self.insert1(\n",
- " dict(**key, segmented_masks=label_im)\n",
- " )\n",
- " print('Detected {} ROIs!\\n'.format(len(rois)))\n",
+ " self.insert1(dict(**key, segmented_masks=label_im))\n",
+ " print(\"Detected {} ROIs!\\n\".format(len(rois)))\n",
" # then insert into the part table\n",
" self.Roi.insert(rois)"
]
@@ -2395,7 +2401,7 @@
],
"source": [
"# Select 'No' when it pops up\n",
- "(Segmentation & 'seg_param_id = 0').delete()"
+ "(Segmentation & \"seg_param_id = 0\").delete()"
]
},
{
@@ -2498,7 +2504,7 @@
}
],
"source": [
- "SegmentationParam & 'seg_param_id = 0'"
+ "SegmentationParam & \"seg_param_id = 0\""
]
},
{
@@ -2520,7 +2526,7 @@
}
],
"source": [
- "(SegmentationParam() & 'seg_param_id = 0').delete()"
+ "(SegmentationParam() & \"seg_param_id = 0\").delete()"
]
},
{
@@ -2681,7 +2687,7 @@
],
"source": [
"# show one example ROI\n",
- "masks = (Segmentation.Roi).fetch('mask')\n",
+ "masks = (Segmentation.Roi).fetch(\"mask\")\n",
"plt.imshow(masks[2])"
]
},
@@ -2709,14 +2715,18 @@
"source": [
"from skimage import io\n",
"import os\n",
+ "\n",
+ "\n",
"@schema\n",
- "class Fluorescence(dj.Imported): # imported table because it also rely on the external tiff file.\n",
+ "class Fluorescence(\n",
+ " dj.Imported\n",
+ "): # imported table because it also rely on the external tiff file.\n",
" definition = \"\"\"\n",
" -> Segmentation\n",
" ---\n",
" time : longblob # time for each frame\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Trace(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -2724,43 +2734,40 @@
" ---\n",
" trace : longblob # fluorescence trace of each ROI\n",
" \"\"\"\n",
- " \n",
+ "\n",
" # the master table is mainly to perform the computation, while the part table contains the result\n",
" def make(self, key):\n",
- " \n",
- " print('Populating: {}'.format(key))\n",
+ " print(\"Populating: {}\".format(key))\n",
" # fetch data directory from table Session\n",
- " data_path = '../01-Calcium_Imaging/' + (Session & key).fetch1('data_path')\n",
- " \n",
+ " data_path = \"../01-Calcium_Imaging/\" + (Session & key).fetch1(\"data_path\")\n",
+ "\n",
" # fetch data file name from table Scan\n",
- " file_name = (Scan & key).fetch1('file_name')\n",
- " \n",
+ " file_name = (Scan & key).fetch1(\"file_name\")\n",
+ "\n",
" # load the file\n",
" im = io.imread(os.path.join(data_path, file_name))\n",
- " \n",
+ "\n",
" # get dimensions of the image and reshape\n",
" n, w, h = np.shape(im)\n",
- " im_reshaped = np.reshape(im, [n, w*h])\n",
- " \n",
+ " im_reshaped = np.reshape(im, [n, w * h])\n",
+ "\n",
" # get frames per second to compute time\n",
- " fps = (Scan & key).fetch1('fps')\n",
- " \n",
+ " fps = (Scan & key).fetch1(\"fps\")\n",
+ "\n",
" # insert into master table first\n",
- " self.insert1(dict(**key, time=np.array(range(n))/fps))\n",
- " \n",
- " \n",
+ " self.insert1(dict(**key, time=np.array(range(n)) / fps))\n",
+ "\n",
" # extract traces\n",
- " roi_keys, masks = (Segmentation.Roi & key).fetch('KEY', 'mask')\n",
- " \n",
+ " roi_keys, masks = (Segmentation.Roi & key).fetch(\"KEY\", \"mask\")\n",
+ "\n",
" traces = []\n",
" for roi_key, mask in zip(roi_keys, masks):\n",
- " \n",
" # reshape mask\n",
- " mask_reshaped = np.reshape(mask, [w*h])\n",
+ " mask_reshaped = np.reshape(mask, [w * h])\n",
" trace = np.mean(im_reshaped[:, mask_reshaped], axis=1)\n",
- " \n",
+ "\n",
" traces.append(dict(**roi_key, trace=trace))\n",
- " \n",
+ "\n",
" self.Trace.insert(traces)"
]
},
@@ -2823,14 +2830,14 @@
"key = dict(mouse_id=0, session_number=1, scan_idx=1, seg_param_id=1)\n",
"\n",
"# ENTER YOUR CODE! - fetch 'time' from the Fluorescence table using fetch1()\n",
- "time = (Fluorescence & key).fetch1('time')\n",
+ "time = (Fluorescence & key).fetch1(\"time\")\n",
"\n",
"# ENTER YOUR CODE! - fetch 'trace' from the Fluorescence.Trace table using fetch()\n",
- "traces = (Fluorescence.Trace & key).fetch('trace')\n",
+ "traces = (Fluorescence.Trace & key).fetch(\"trace\")\n",
"\n",
"plt.plot(time, np.vstack(traces).T)\n",
- "plt.xlabel('Time (s)')\n",
- "plt.ylabel('Fluorescence')"
+ "plt.xlabel(\"Time (s)\")\n",
+ "plt.ylabel(\"Fluorescence\")"
]
},
{
diff --git a/completed_tutorials/04-Electrophysiology Imported Tables.ipynb b/completed_tutorials/04-Electrophysiology Imported Tables.ipynb
index a52528b..38e93af 100644
--- a/completed_tutorials/04-Electrophysiology Imported Tables.ipynb
+++ b/completed_tutorials/04-Electrophysiology Imported Tables.ipynb
@@ -50,6 +50,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -359,6 +360,7 @@
"outputs": [],
"source": [
"from tutorial_pipeline import data_dir\n",
+ "\n",
"data_dir"
]
},
@@ -403,7 +405,7 @@
}
],
"source": [
- "keys = Session.fetch('KEY')\n",
+ "keys = Session.fetch(\"KEY\")\n",
"keys"
]
},
@@ -564,7 +566,9 @@
}
],
"source": [
- "filename = '{data_dir}/data_{mouse_id}_{session_date}.npy'.format(**key, data_dir=data_dir)\n",
+ "filename = \"{data_dir}/data_{mouse_id}_{session_date}.npy\".format(\n",
+ " **key, data_dir=data_dir\n",
+ ")\n",
"filename"
]
},
@@ -1167,8 +1171,9 @@
" ---\n",
" activity: longblob # electric activity of the neuron\n",
" \"\"\"\n",
- " def make(self, key): # `make` takes a single argument `key`\n",
- " print('key is', key)"
+ "\n",
+ " def make(self, key): # `make` takes a single argument `key`\n",
+ " print(\"key is\", key)"
]
},
{
@@ -1369,25 +1374,32 @@
" ---\n",
" activity: longblob # electric activity of the neuron\n",
" \"\"\"\n",
+ "\n",
" def make(self, key):\n",
" # use key dictionary to determine the data file path\n",
- " data_file = '{data_dir}/data_{mouse_id}_{session_date}.npy'.format(**key, data_dir=data_dir)\n",
+ " data_file = \"{data_dir}/data_{mouse_id}_{session_date}.npy\".format(\n",
+ " **key, data_dir=data_dir\n",
+ " )\n",
"\n",
" # load the data\n",
" data = np.load(data_file)\n",
- " \n",
- " print('Populating neuron(s) for mouse_id={mouse_id} on session_date={session_date}'.format(**key))\n",
+ "\n",
+ " print(\n",
+ " \"Populating neuron(s) for mouse_id={mouse_id} on session_date={session_date}\".format(\n",
+ " **key\n",
+ " )\n",
+ " )\n",
" for idx, d in enumerate(data):\n",
" # add the index of the 1st dimension as neuron_id\n",
- " key['neuron_id'] = idx\n",
- " \n",
+ " key[\"neuron_id\"] = idx\n",
+ "\n",
" # add the loaded data as the \"activity\" column\n",
- " key['activity'] = d\n",
+ " key[\"activity\"] = d\n",
"\n",
" # insert the key into self\n",
" self.insert1(key)\n",
"\n",
- " print('\\tPopulated neuron {neuron_id}'.format(**key))"
+ " print(\"\\tPopulated neuron {neuron_id}\".format(**key))"
]
},
{
@@ -1583,12 +1595,14 @@
"metadata": {},
"outputs": [],
"source": [
- "Session.insert1({\n",
- " \"mouse_id\": 100,\n",
- " \"session_date\": \"2017-06-01\",\n",
- " \"experiment_setup\": 1,\n",
- " \"experimenter\": \"Jacob Reimer\"\n",
- "})"
+ "Session.insert1(\n",
+ " {\n",
+ " \"mouse_id\": 100,\n",
+ " \"session_date\": \"2017-06-01\",\n",
+ " \"experiment_setup\": 1,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
+ " }\n",
+ ")"
]
},
{
diff --git a/completed_tutorials/05-Electrophysiology Computed Tables.ipynb b/completed_tutorials/05-Electrophysiology Computed Tables.ipynb
index 7fa91ef..f749ecc 100644
--- a/completed_tutorials/05-Electrophysiology Computed Tables.ipynb
+++ b/completed_tutorials/05-Electrophysiology Computed Tables.ipynb
@@ -53,6 +53,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -90,7 +91,9 @@
],
"source": [
"import os\n",
- "if os.path.basename(os.getcwd())!='notebooks': os.chdir('/home/notebooks')\n",
+ "\n",
+ "if os.path.basename(os.getcwd()) != \"notebooks\":\n",
+ " os.chdir(\"/home/notebooks\")\n",
"from tutorial_pipeline.ephys_cell_activity import schema, Mouse, Session, Neuron"
]
},
@@ -393,7 +396,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Neuron().fetch('KEY')\n",
+ "keys = Neuron().fetch(\"KEY\")\n",
"\n",
"# pick one key\n",
"key = keys[0]"
@@ -512,7 +515,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron() & key).fetch('activity')"
+ "activity = (Neuron() & key).fetch(\"activity\")"
]
},
{
@@ -1074,7 +1077,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron() & key).fetch1('activity')"
+ "activity = (Neuron() & key).fetch1(\"activity\")"
]
},
{
@@ -1509,6 +1512,7 @@
"source": [
"# ENTER YOUR CODE! - complete the `make` method\n",
"\n",
+ "\n",
"@schema\n",
"class ActivityStatistics(dj.Computed):\n",
" definition = \"\"\"\n",
@@ -1518,16 +1522,20 @@
" stdev: float # standard deviation of activity\n",
" max: float # maximum activity\n",
" \"\"\"\n",
- " \n",
+ "\n",
" def make(self, key):\n",
- " activity = (Neuron() & key).fetch1('activity') # fetch activity as NumPy array\n",
+ " activity = (Neuron() & key).fetch1(\"activity\") # fetch activity as NumPy array\n",
"\n",
" # compute various statistics on activity\n",
- " key['mean'] = activity.mean() # compute mean\n",
- " key['stdev'] = activity.std() # compute standard deviation\n",
- " key['max'] = activity.max() # compute max\n",
+ " key[\"mean\"] = activity.mean() # compute mean\n",
+ " key[\"stdev\"] = activity.std() # compute standard deviation\n",
+ " key[\"max\"] = activity.max() # compute max\n",
" self.insert1(key)\n",
- " print('Computed statistics for neuron_id {neuron_id} mouse_id {mouse_id} session_date {session_date}'.format(**key))"
+ " print(\n",
+ " \"Computed statistics for neuron_id {neuron_id} mouse_id {mouse_id} session_date {session_date}\".format(\n",
+ " **key\n",
+ " )\n",
+ " )"
]
},
{
@@ -1718,7 +1726,7 @@
"outputs": [],
"source": [
"# get all keys\n",
- "keys = Neuron.fetch('KEY')"
+ "keys = Neuron.fetch(\"KEY\")"
]
},
{
@@ -1728,7 +1736,7 @@
"outputs": [],
"source": [
"# fetch all activities - returned as NumPy array of NumPy arrays\n",
- "activities = (Neuron & keys).fetch('activity')"
+ "activities = (Neuron & keys).fetch(\"activity\")"
]
},
{
@@ -1753,8 +1761,8 @@
"fig, axs = plt.subplots(1, len(activities), figsize=(16, 4))\n",
"for activity, ax in zip(activities, axs.ravel()):\n",
" ax.plot(activity)\n",
- " ax.set_xlabel('Time')\n",
- " ax.set_ylabel('Activity')\n",
+ " ax.set_xlabel(\"Time\")\n",
+ " ax.set_ylabel(\"Activity\")\n",
"\n",
"fig.tight_layout()"
]
@@ -1781,7 +1789,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron & key).fetch1('activity')"
+ "activity = (Neuron & key).fetch1(\"activity\")"
]
},
{
@@ -1814,8 +1822,8 @@
],
"source": [
"plt.plot(activity)\n",
- "plt.xlabel('Time')\n",
- "plt.ylabel('Activity')\n",
+ "plt.xlabel(\"Time\")\n",
+ "plt.ylabel(\"Activity\")\n",
"plt.xlim([0, 300])"
]
},
@@ -1858,11 +1866,11 @@
"threshold = 0.5\n",
"\n",
"# find activity above threshold\n",
- "above_thrs = (activity > threshold).astype(np.int) \n",
+ "above_thrs = (activity > threshold).astype(np.int)\n",
"\n",
"plt.plot(activity)\n",
"plt.plot(above_thrs)\n",
- "plt.xlabel('Time')\n",
+ "plt.xlabel(\"Time\")\n",
"plt.xlim([0, 300])"
]
},
@@ -1902,13 +1910,16 @@
}
],
"source": [
- "rising = (np.diff(above_thrs) > 0).astype(np.int) # find rising edge of crossing threshold\n",
- "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
+ "rising = (np.diff(above_thrs) > 0).astype(\n",
+ " np.int\n",
+ ") # find rising edge of crossing threshold\n",
+ "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
"\n",
"plt.plot(activity)\n",
"plt.plot(above_thrs)\n",
- "plt.plot(np.where(spikes>0), 1, 'ro'); # plot only spike points\n",
- "plt.xlabel('Time')\n",
+ "plt.plot(np.where(spikes > 0), 1, \"ro\")\n",
+ "# plot only spike points\n",
+ "plt.xlabel(\"Time\")\n",
"plt.xlim([0, 300])"
]
},
@@ -1936,7 +1947,7 @@
}
],
"source": [
- "count = spikes.sum() # compute total spike counts\n",
+ "count = spikes.sum() # compute total spike counts\n",
"count"
]
},
@@ -1968,22 +1979,25 @@
"source": [
"# ENTER YOUR CODE! - try different values of threshold!\n",
"\n",
- "threshold = 0.7 # enter different threshold values here\n",
+ "threshold = 0.7 # enter different threshold values here\n",
"\n",
"# find activity above threshold\n",
- "above_thrs = (activity > threshold).astype(np.int) \n",
+ "above_thrs = (activity > threshold).astype(np.int)\n",
"\n",
- "rising = (np.diff(above_thrs) > 0).astype(np.int) # find rising edge of crossing threshold\n",
- "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
+ "rising = (np.diff(above_thrs) > 0).astype(\n",
+ " np.int\n",
+ ") # find rising edge of crossing threshold\n",
+ "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
"\n",
- "count = spikes.sum() # compute total spike counts\n",
+ "count = spikes.sum() # compute total spike counts\n",
"\n",
"\n",
"plt.plot(activity)\n",
"plt.plot(above_thrs)\n",
- "plt.plot(np.where(spikes>0), 1, 'ro'); # plot only spike points\n",
- "plt.xlabel('Time')\n",
- "plt.title('Total spike counts: {}'.format(count));"
+ "plt.plot(np.where(spikes > 0), 1, \"ro\")\n",
+ "# plot only spike points\n",
+ "plt.xlabel(\"Time\")\n",
+ "plt.title(\"Total spike counts: {}\".format(count));"
]
},
{
@@ -2112,7 +2126,7 @@
" spikes: longblob # detected spikes\n",
" count: int # total number of detected spikes\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Waveform(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -2172,7 +2186,7 @@
" spikes: longblob # detected spikes\n",
" count: int # total number of detected spikes\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Waveform(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -2180,33 +2194,41 @@
" ---\n",
" waveform: longblob # waveform extracted from this spike\n",
" \"\"\"\n",
- " \n",
+ "\n",
" def make(self, key):\n",
- " print('Populating for: ', key)\n",
+ " print(\"Populating for: \", key)\n",
"\n",
- " activity = (Neuron() & key).fetch1('activity')\n",
- " threshold = (SpikeDetectionParam() & key).fetch1('threshold')\n",
+ " activity = (Neuron() & key).fetch1(\"activity\")\n",
+ " threshold = (SpikeDetectionParam() & key).fetch1(\"threshold\")\n",
"\n",
- " above_thrs = (activity > threshold).astype(np.int) # find activity above threshold\n",
- " rising = (np.diff(above_thrs) > 0).astype(np.int) # find rising edge of crossing threshold\n",
- " spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
+ " above_thrs = (activity > threshold).astype(\n",
+ " np.int\n",
+ " ) # find activity above threshold\n",
+ " rising = (np.diff(above_thrs) > 0).astype(\n",
+ " np.int\n",
+ " ) # find rising edge of crossing threshold\n",
+ " spikes = np.hstack(\n",
+ " (0, rising)\n",
+ " ) # prepend 0 to account for shortening due to np.diff\n",
"\n",
- " count = spikes.sum() # compute total spike counts\n",
- " print('Detected {} spikes!\\n'.format(count))\n",
+ " count = spikes.sum() # compute total spike counts\n",
+ " print(\"Detected {} spikes!\\n\".format(count))\n",
"\n",
" # create and insert a new dictionary containing `key` and additionally `spikes` and `count`\n",
- " self.insert1(dict(key, spikes=spikes, count=count)) \n",
- " \n",
+ " self.insert1(dict(key, spikes=spikes, count=count))\n",
+ "\n",
" # extract waveform for the `Waveform` part-table\n",
- " before_spk, after_spk = 40, 40 # extract 40 sample points before and after a spike as the waveform\n",
- " for spk_id, spk in enumerate(np.where(spikes==1)[0]):\n",
- " \n",
+ " before_spk, after_spk = (\n",
+ " 40,\n",
+ " 40,\n",
+ " ) # extract 40 sample points before and after a spike as the waveform\n",
+ " for spk_id, spk in enumerate(np.where(spikes == 1)[0]):\n",
" # For simplicity, skip the spikes too close to the beginning or the end\n",
- " if spk - before_spk < 0 or spk + after_spk > len(activity) + 1: \n",
+ " if spk - before_spk < 0 or spk + after_spk > len(activity) + 1:\n",
" continue\n",
- " \n",
- " wf = activity[spk - before_spk: spk + after_spk] \n",
- " \n",
+ "\n",
+ " wf = activity[spk - before_spk : spk + after_spk]\n",
+ "\n",
" # create and insert a new dictionary containing `key` and additionally `spike_id` and `waveform`\n",
" self.Waveform.insert1(dict(key, spike_id=spk_id, waveform=wf))"
]
@@ -2923,7 +2945,7 @@
],
"source": [
"# ENTER YOUR CODE! - Now, build a query for the waveforms from mouse 100, session on \"2017-05-25\", with detection param 0\n",
- "Spikes.Waveform & 'mouse_id = 100' & 'session_date = \"2017-05-25\"' & 'sdp_id = 0'"
+ "Spikes.Waveform & \"mouse_id = 100\" & 'session_date = \"2017-05-25\"' & \"sdp_id = 0\""
]
},
{
@@ -2933,7 +2955,9 @@
"outputs": [],
"source": [
"# ENTER YOUR CODE! - try fetching all the waveforms\n",
- "waveforms = (Spikes.Waveform & 'mouse_id = 100' & 'session_date = \"2017-05-25\"' & 'sdp_id = 0').fetch('waveform')"
+ "waveforms = (\n",
+ " Spikes.Waveform & \"mouse_id = 100\" & 'session_date = \"2017-05-25\"' & \"sdp_id = 0\"\n",
+ ").fetch(\"waveform\")"
]
},
{
@@ -3326,7 +3350,7 @@
}
],
"source": [
- "(Spikes & 'sdp_id = 0').delete()"
+ "(Spikes & \"sdp_id = 0\").delete()"
]
},
{
@@ -3425,7 +3449,7 @@
}
],
"source": [
- "SpikeDetectionParam() & 'sdp_id = 0'"
+ "SpikeDetectionParam() & \"sdp_id = 0\""
]
},
{
@@ -3447,7 +3471,7 @@
}
],
"source": [
- "(SpikeDetectionParam() & 'sdp_id = 0').delete()"
+ "(SpikeDetectionParam() & \"sdp_id = 0\").delete()"
]
},
{
diff --git a/db-course/000-Connect.ipynb b/db-course/000-Connect.ipynb
index b1e89da..4eaa7cb 100644
--- a/db-course/000-Connect.ipynb
+++ b/db-course/000-Connect.ipynb
@@ -15,9 +15,9 @@
"metadata": {},
"outputs": [],
"source": [
- "dj.config['database.host'] = '127.0.0.1'\n",
- "dj.config['database.user'] = 'root'\n",
- "dj.config['database.password'] = 'simple'"
+ "dj.config[\"database.host\"] = \"127.0.0.1\"\n",
+ "dj.config[\"database.user\"] = \"root\"\n",
+ "dj.config[\"database.password\"] = \"simple\""
]
},
{
@@ -44,7 +44,7 @@
}
],
"source": [
- "schema = dj.Schema('university')"
+ "schema = dj.Schema(\"university\")"
]
},
{
@@ -247,8 +247,9 @@
"outputs": [],
"source": [
"from faker import Faker\n",
+ "\n",
"faker = Faker()\n",
- "from tqdm import tqdm "
+ "from tqdm import tqdm"
]
},
{
@@ -265,7 +266,10 @@
}
],
"source": [
- "Person.insert((i, faker.first_name(), faker.last_name(), faker.date_of_birth()) for i in tqdm(range(1000,2000)))"
+ "Person.insert(\n",
+ " (i, faker.first_name(), faker.last_name(), faker.date_of_birth())\n",
+ " for i in tqdm(range(1000, 2000))\n",
+ ")"
]
},
{
diff --git a/db-course/000-ConnectCursors.ipynb b/db-course/000-ConnectCursors.ipynb
index ef09deb..a8daa72 100644
--- a/db-course/000-ConnectCursors.ipynb
+++ b/db-course/000-ConnectCursors.ipynb
@@ -28,17 +28,16 @@
"source": [
"import pymysql\n",
"\n",
- "creds = {'user': 'root', 'password': 'simple', 'host':'127.0.0.1'}\n",
+ "creds = {\"user\": \"root\", \"password\": \"simple\", \"host\": \"127.0.0.1\"}\n",
"\n",
"# establish a database connection\n",
"conn = pymysql.connect(\n",
- " host=creds['host'], \n",
- " user=creds['user'], \n",
- " passwd=creds['password'],\n",
+ " host=creds[\"host\"],\n",
+ " user=creds[\"user\"],\n",
+ " passwd=creds[\"password\"],\n",
" autocommit=True,\n",
")\n",
- "cursor = conn.cursor(\n",
- " cursor=pymysql.cursors.DictCursor)\n",
+ "cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n",
"cursor.execute(\"CREATE SCHEMA IF NOT EXISTS university\")"
]
},
@@ -48,12 +47,12 @@
"metadata": {},
"outputs": [],
"source": [
- "s = '''\n",
+ "s = \"\"\"\n",
"Yesterday, \n",
"all my troubles seemed so far away.\n",
"Now it seems as though they're here to stay.\n",
"Oh, how I long for yesterday.\n",
- "'''"
+ "\"\"\""
]
},
{
@@ -82,7 +81,7 @@
}
],
"source": [
- "cursor.execute('CREATE schema university')"
+ "cursor.execute(\"CREATE schema university\")"
]
},
{
@@ -111,8 +110,9 @@
}
],
"source": [
- "cursor.execute('USE university')\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\"USE university\")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE person(\n",
"person_id int not NULL,\n",
"first_name varchar(30) NOT NULL,\n",
@@ -130,10 +130,13 @@
" (2, \"Jane\", \"Doe\", \"2002-02-02\"),\n",
" (3, \"John\", \"Smith\", \"2003-03-01\"),\n",
" (4, \"John\", \"Wick\", \"1979-12-02\")\n",
- "\"\"\")\n",
- "cursor.execute(\"\"\"\n",
+ "\"\"\"\n",
+ ")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" SELECT * FROM person\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -145,28 +148,39 @@
"for rec in cursor:\n",
" print(rec)\n",
"cursor.fetchall()\n",
- "# insert \n",
+ "# insert\n",
"cursor.execute(\n",
" \"\"\"\n",
" INSERT INTO person \n",
" (person_id, first_name, last_name, date_of_birth) VALUES\n",
" (%s, %s, %s, %s)\n",
- "\"\"\", (5, faker.first_name(), faker.last_name(), faker.date_of_birth()))\n",
+ "\"\"\",\n",
+ " (5, faker.first_name(), faker.last_name(), faker.date_of_birth()),\n",
+ ")\n",
"from tqdm import tqdm\n",
+ "\n",
"for i in tqdm(range(1000, 1200)):\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" INSERT INTO \n",
" person (person_id, first_name, last_name, date_of_birth) \n",
" VALUES (%s, %s, %s, %s)\n",
- " \"\"\", (i, faker.first_name(), faker.last_name(), faker.date_of_birth()))\n",
- "cursor.execute(\"\"\"\n",
+ " \"\"\",\n",
+ " (i, faker.first_name(), faker.last_name(), faker.date_of_birth()),\n",
+ " )\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT * FROM person\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"cursor.fetchall()\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"DROP TABLE dimitri_test.fake_person\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"import datetime\n",
+ "\n",
"cursor.execute(\"\"\"USE dimitri_test\"\"\")\n",
"cursor.execute(\"\"\"SELECT * FROM fake_person\"\"\")\n",
"cursor.fetchone()\n",
@@ -175,18 +189,24 @@
"for rec in cursor:\n",
" print(rec)\n",
"import datetime\n",
- "faker.date_between(datetime.date(2018, 2, 3), 'today')\n",
- "cursor.execute(\"\"\"\n",
+ "\n",
+ "faker.date_between(datetime.date(2018, 2, 3), \"today\")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"drop table fake_death\n",
- "\"\"\")\n",
- "cursor.execute(\"\"\"\n",
+ "\"\"\"\n",
+ ")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE dimitri_test.fake_death(\n",
" person_id int not null,\n",
" date_of_death date NOT NULL,\n",
" primary key(person_id), \n",
" foreign key (person_id) REFERENCES dimitri_test.fake_person (person_id))\n",
- "\"\"\")\n",
- "cursor.execute(\"\"\"\n",
+ "\"\"\"\n",
+ ")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE hotel_reserviation(\n",
" \n",
" hotel varchar(20) not null\n",
@@ -197,8 +217,10 @@
" unique index (person_id, reservation_date), \n",
" primary key (hotel, room, reservation_date),\n",
" foreign key (person_id) references fake_person(person_id)\n",
- "\"\"\")\n",
- "cursor.execute(\"\"\"\n",
+ "\"\"\"\n",
+ ")\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE bank_account (\n",
" \n",
" bank_id int not null, \n",
@@ -206,9 +228,11 @@
" \n",
" primary key(bank_id, account)\n",
"\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE bank_account_owner (\n",
" \n",
" bank_id int not null, \n",
@@ -219,30 +243,50 @@
" foreign key (person_id) references fake_person(person_id),\n",
" foreign key (bank_id, account) references fake_person(bank_id, account),\n",
"\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT * FROM fake_person\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"cursor.fetchone()\n",
"cursor.execute(\"\"\"INSERT into fake_death (date_of_death) values ('2020-10-09')\"\"\")\n",
- "cursor.execute(\"\"\"INSERT into fake_death (person_id, date_of_death) values (1000, '2020-09-09')\"\"\")\n",
- "persons = cursor.execute(\"\"\"SELECT person_id, date_of_birth FROM dimitri_test.fake_person\"\"\")\n",
+ "cursor.execute(\n",
+ " \"\"\"INSERT into fake_death (person_id, date_of_death) values (1000, '2020-09-09')\"\"\"\n",
+ ")\n",
+ "persons = cursor.execute(\n",
+ " \"\"\"SELECT person_id, date_of_birth FROM dimitri_test.fake_person\"\"\"\n",
+ ")\n",
"for rec in cursor.fetchall():\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" INSERT INTO dimitri_test.fake_death (person_id, date_of_death) VALUES (%s, %s)\n",
- " \"\"\", (rec['person_id'], faker.date_between(rec['date_of_birth'], rec['date_of_birth'] + datetime.timedelta(days=40000))))\n",
- " \n",
+ " \"\"\",\n",
+ " (\n",
+ " rec[\"person_id\"],\n",
+ " faker.date_between(\n",
+ " rec[\"date_of_birth\"],\n",
+ " rec[\"date_of_birth\"] + datetime.timedelta(days=40000),\n",
+ " ),\n",
+ " ),\n",
+ " )\n",
+ "\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT first_name, floor(DATEDIFF(date_of_death, date_of_birth)/365.25) as died_at\n",
- "FROM dimitri_test.fake_person NATURAL JOIN dimitri_test.fake_death\"\"\")\n",
+ "FROM dimitri_test.fake_person NATURAL JOIN dimitri_test.fake_death\"\"\"\n",
+ ")\n",
"\n",
"for rec in cursor:\n",
" print(rec)\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"DROP TABLE dimitri_test.fake_death\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
diff --git a/db-course/000-ConnectSQL.ipynb b/db-course/000-ConnectSQL.ipynb
index ea3bd55..ecd1ed6 100644
--- a/db-course/000-ConnectSQL.ipynb
+++ b/db-course/000-ConnectSQL.ipynb
@@ -7,6 +7,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()"
]
},
diff --git a/db-course/001-FakeIt.ipynb b/db-course/001-FakeIt.ipynb
index 96f15bf..f243640 100644
--- a/db-course/001-FakeIt.ipynb
+++ b/db-course/001-FakeIt.ipynb
@@ -112,7 +112,7 @@
"metadata": {},
"outputs": [],
"source": [
- "creds = {'user': 'root', 'password': 'simple', 'host':'127.0.0.1'}"
+ "creds = {\"user\": \"root\", \"password\": \"simple\", \"host\": \"127.0.0.1\"}"
]
},
{
@@ -145,9 +145,9 @@
"\n",
"# establish a database connection\n",
"conn = pymysql.connect(\n",
- " host=creds['host'], \n",
- " user=creds['user'], \n",
- " passwd=creds['password'],\n",
+ " host=creds[\"host\"],\n",
+ " user=creds[\"user\"],\n",
+ " passwd=creds[\"password\"],\n",
" autocommit=True,\n",
")"
]
@@ -158,8 +158,7 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor = conn.cursor(\n",
- " cursor=pymysql.cursors.DictCursor)"
+ "cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)"
]
},
{
@@ -188,12 +187,12 @@
"metadata": {},
"outputs": [],
"source": [
- "s = '''\n",
+ "s = \"\"\"\n",
"Yesterday, \n",
"all my troubles seemed so far away.\n",
"Now it seems as though they're here to stay.\n",
"Oh, how I long for yesterday.\n",
- "'''"
+ "\"\"\""
]
},
{
@@ -213,7 +212,7 @@
}
],
"source": [
- "cursor.execute('USE university')"
+ "cursor.execute(\"USE university\")"
]
},
{
@@ -233,7 +232,8 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE person(\n",
"person_id int not NULL,\n",
"first_name varchar(30) NOT NULL,\n",
@@ -278,7 +278,8 @@
" (2, \"Jane\", \"Doe\", \"2002-02-02\"),\n",
" (3, \"John\", \"Smith\", \"2003-03-01\"),\n",
" (4, \"John\", \"Wick\", \"1979-12-02\")\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -298,9 +299,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" SELECT * FROM person\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -361,13 +364,15 @@
}
],
"source": [
- "# insert \n",
+ "# insert\n",
"cursor.execute(\n",
" \"\"\"\n",
" INSERT INTO person \n",
" (person_id, first_name, last_name, date_of_birth) VALUES\n",
" (%s, %s, %s, %s)\n",
- "\"\"\", (5, faker.first_name(), faker.last_name(), faker.date_of_birth()))"
+ "\"\"\",\n",
+ " (5, faker.first_name(), faker.last_name(), faker.date_of_birth()),\n",
+ ")"
]
},
{
@@ -394,11 +399,14 @@
],
"source": [
"for i in tqdm(range(1000, 1200)):\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" INSERT INTO \n",
" person (person_id, first_name, last_name, date_of_birth) \n",
" VALUES (%s, %s, %s, %s)\n",
- " \"\"\", (i, faker.first_name(), faker.last_name(), faker.date_of_birth()))"
+ " \"\"\",\n",
+ " (i, faker.first_name(), faker.last_name(), faker.date_of_birth()),\n",
+ " )"
]
},
{
@@ -418,9 +426,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT * FROM person\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -449,9 +459,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"DROP TABLE dimitri_test.fake_person\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -525,7 +537,7 @@
"metadata": {},
"outputs": [],
"source": [
- "faker.date_between(datetime.date(2018, 2, 3), 'today')"
+ "faker.date_between(datetime.date(2018, 2, 3), \"today\")"
]
},
{
@@ -534,9 +546,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"drop table fake_death\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -545,13 +559,15 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE dimitri_test.fake_death(\n",
" person_id int not null,\n",
" date_of_death date NOT NULL,\n",
" primary key(person_id), \n",
" foreign key (person_id) REFERENCES dimitri_test.fake_person (person_id))\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -560,7 +576,8 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE hotel_reserviation(\n",
" \n",
" hotel varchar(20) not null\n",
@@ -571,7 +588,8 @@
" unique index (person_id, reservation_date), \n",
" primary key (hotel, room, reservation_date),\n",
" foreign key (person_id) references fake_person(person_id)\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -580,7 +598,8 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE bank_account (\n",
" \n",
" bank_id int not null, \n",
@@ -588,9 +607,11 @@
" \n",
" primary key(bank_id, account)\n",
"\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE bank_account_owner (\n",
" \n",
" bank_id int not null, \n",
@@ -601,7 +622,8 @@
" foreign key (person_id) references fake_person(person_id),\n",
" foreign key (bank_id, account) references fake_person(bank_id, account),\n",
"\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -617,9 +639,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT * FROM fake_person\n",
- "\"\"\")\n",
+ "\"\"\"\n",
+ ")\n",
"cursor.fetchone()"
]
},
@@ -638,7 +662,9 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"INSERT into fake_death (person_id, date_of_death) values (1000, '2020-09-09')\"\"\")"
+ "cursor.execute(\n",
+ " \"\"\"INSERT into fake_death (person_id, date_of_death) values (1000, '2020-09-09')\"\"\"\n",
+ ")"
]
},
{
@@ -647,12 +673,22 @@
"metadata": {},
"outputs": [],
"source": [
- "persons = cursor.execute(\"\"\"SELECT person_id, date_of_birth FROM dimitri_test.fake_person\"\"\")\n",
+ "persons = cursor.execute(\n",
+ " \"\"\"SELECT person_id, date_of_birth FROM dimitri_test.fake_person\"\"\"\n",
+ ")\n",
"for rec in cursor.fetchall():\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" INSERT INTO dimitri_test.fake_death (person_id, date_of_death) VALUES (%s, %s)\n",
- " \"\"\", (rec['person_id'], faker.date_between(rec['date_of_birth'], rec['date_of_birth'] + datetime.timedelta(days=40000))))\n",
- " "
+ " \"\"\",\n",
+ " (\n",
+ " rec[\"person_id\"],\n",
+ " faker.date_between(\n",
+ " rec[\"date_of_birth\"],\n",
+ " rec[\"date_of_birth\"] + datetime.timedelta(days=40000),\n",
+ " ),\n",
+ " ),\n",
+ " )"
]
},
{
@@ -668,9 +704,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"SELECT first_name, floor(DATEDIFF(date_of_death, date_of_birth)/365.25) as died_at\n",
- "FROM dimitri_test.fake_person NATURAL JOIN dimitri_test.fake_death\"\"\")\n",
+ "FROM dimitri_test.fake_person NATURAL JOIN dimitri_test.fake_death\"\"\"\n",
+ ")\n",
"\n",
"for rec in cursor:\n",
" print(rec)"
@@ -682,9 +720,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"DROP TABLE dimitri_test.fake_death\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
diff --git a/db-course/002-Tables.ipynb b/db-course/002-Tables.ipynb
index 1b97fdf..de4914d 100644
--- a/db-course/002-Tables.ipynb
+++ b/db-course/002-Tables.ipynb
@@ -69,7 +69,7 @@
"2. For each entity type, enforce 1:1 correspondence between the real-world entity and its representation in the table. How can you do it?\n",
"3. In the real-world, we need to permanently associate a persistent identifier with each entity of the class. The database cannot do it by itself.\n",
"4. The database can use the permanent identifier to enforce uniqueness in the table using a uniqueness constraint.\n",
- "5. A **primary key** is a unique, non-nullable index that is designated as the primary way to identify entitites in a table. Each tables must have a carefully chosen primary key. \n",
+ "5. A **primary key** is a unique, non-nullable index that is designated as the primary way to identify entities in a table. Each tables must have a carefully chosen primary key. \n",
"6. Secondary unique indexes can be nullable.\n",
"\n",
"\n",
@@ -91,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.Schema('test')"
+ "schema = dj.Schema(\"test\")"
]
},
{
@@ -107,7 +107,7 @@
" ---\n",
" make : varchar(16)\n",
" year : year\n",
- " \"\"\" "
+ " \"\"\""
]
},
{
@@ -116,14 +116,14 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class Classroom(dj.Manual):\n",
" definition = \"\"\"\n",
" building_code : char(3)\n",
" room_number : smallint unsigned \n",
" ---\n",
" capacity : smallint unsigned\n",
- " \"\"\"\n"
+ " \"\"\""
]
}
],
diff --git a/db-course/003-ForeignKeys-SQL.ipynb b/db-course/003-ForeignKeys-SQL.ipynb
index 15e479d..f2a059c 100644
--- a/db-course/003-ForeignKeys-SQL.ipynb
+++ b/db-course/003-ForeignKeys-SQL.ipynb
@@ -7,6 +7,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
diff --git a/db-course/003-ForeignKeys.ipynb b/db-course/003-ForeignKeys.ipynb
index f47e2b1..163925e 100644
--- a/db-course/003-ForeignKeys.ipynb
+++ b/db-course/003-ForeignKeys.ipynb
@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Referential integrity"
+ "# Referential integrity\n"
]
},
{
@@ -13,40 +13,41 @@
"source": [
"## Referential integrity\n",
"\n",
- "* Correcting matching of corresponding entities across the schema\n",
- "* Relies on entity integrity\n",
- "* Enfornced by foreign keys\n",
+ "- Correcting matching of corresponding entities across the schema\n",
+ "- Relies on entity integrity\n",
+ "- Enforced by foreign keys\n",
"\n",
"## Foreign keys\n",
- "A foreign key is a column or several columns in the child table referencing the primary key column(s) in the parent table.\n",
"\n",
- "* More generally, foreign keys can reference other sets of columns than the primary key. However, in common practice and in this class foreign keys will always reference the primary key in the referenced table.\n",
+ "A foreign key is a column or several columns in the child table referencing the primary key column(s) in the parent table.\n",
"\n",
+ "- More generally, foreign keys can reference other sets of columns than the primary key. However, in common practice and in this class foreign keys will always reference the primary key in the referenced table.\n",
"\n",
"## Effects of a foreign key constraint\n",
+ "\n",
"1. Restrict inserts into the child table if there is no match in parent.\n",
"2. Restrict deletes (and updates of primary key values) from the parent table when there is a match in child.\n",
"3. An index is created in the child table to speed up searches on the foreign key.\n",
"\n",
"As a result, the child table is prevented from having values in its foreign keys columns in the absence of entries in the parent table with matching primary key values.\n",
"\n",
- "Importantly, unlike other types of links in other data models, no actual link is created between individual rows of both tables. Referential integrity is maintained by restricting dta manipulations."
+ "Importantly, unlike other types of links in other data models, no actual link is created between individual rows of both tables. Referential integrity is maintained by restricting dta manipulations.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Diagramming notation \n",
+ "## Diagramming notation\n",
"\n",
- "- Entity-relationship diagram"
+ "- Entity-relationship diagram\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Examples"
+ "## Examples\n"
]
},
{
@@ -66,7 +67,7 @@
"source": [
"import datajoint as dj\n",
"\n",
- "schema = dj.Schema('person')"
+ "schema = dj.Schema(\"person\")"
]
},
{
@@ -82,7 +83,7 @@
" ---\n",
" full_title : varchar(120)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = [\n",
" (\"SW-Dev1\", \"Software Developer 1\"),\n",
" (\"SW-Dev2\", \"Software Developer 2\"),\n",
@@ -90,7 +91,7 @@
" (\"Web-Dev1\", \"Web Developer 1\"),\n",
" (\"Web-Dev2\", \"Web Developer 2\"),\n",
" (\"Web-Dev3\", \"Web Developer 3\"),\n",
- " (\"HR-Mgr\", \"Human Resources Manager\")\n",
+ " (\"HR-Mgr\", \"Human Resources Manager\"),\n",
" ]"
]
},
@@ -288,8 +289,8 @@
" (\"BIOL\", \"Biology\"),\n",
" (\"MATH\", \"Mathematics\"),\n",
" (\"STAT\", \"Statistics\"),\n",
- " (\"ENG\", \"English\")\n",
- " )"
+ " (\"ENG\", \"English\"),\n",
+ " )"
]
},
{
@@ -420,10 +421,11 @@
"metadata": {},
"source": [
"## Foreign keys have 4 effects\n",
+ "\n",
"0. The primary key of the parent becomes part of the child definition (the foreign key)\n",
"1. Restrict inserts into child table if no match in parent\n",
- "2. Restrict deletes from parent if there is a matching child \n",
- "3. Create an index in child to make searches fast on the value of the FK value."
+ "2. Restrict deletes from parent if there is a matching child\n",
+ "3. Create an index in child to make searches fast on the value of the FK value.\n"
]
},
{
@@ -729,21 +731,21 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class Language(dj.Lookup):\n",
" definition = \"\"\"\n",
" lang_code : char(8)\n",
" ---\n",
" language : varchar(20)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = [\n",
" (\"Eng\", \"English\"),\n",
" (\"Nav\", \"Navajo\"),\n",
" (\"Fr\", \"French\"),\n",
" (\"It\", \"Italian\"),\n",
" (\"Sp\", \"Spanish\"),\n",
- " (\"Ar\", \"Arabic\") \n",
+ " (\"Ar\", \"Arabic\"),\n",
" ]"
]
},
diff --git a/db-course/003-Indexes.ipynb b/db-course/003-Indexes.ipynb
index 0d80c80..7ec0d44 100644
--- a/db-course/003-Indexes.ipynb
+++ b/db-course/003-Indexes.ipynb
@@ -14,18 +14,18 @@
"\n",
"In DataJoint, indexes are created by one of the three mechanisms:\n",
"\n",
- "1. Primary key \n",
- "2. Foreign key \n",
+ "1. Primary key\n",
+ "2. Foreign key\n",
"3. Explicitly defined indexes\n",
"\n",
- "The first two mechanisms are obligatory. Every table has a primary key, which serves as an unique index. Therefore, restrictions by a primary key are very fast. Foreign keys create additional indexes unless a suitable index already exists."
+ "The first two mechanisms are obligatory. Every table has a primary key, which serves as an unique index. Therefore, restrictions by a primary key are very fast. Foreign keys create additional indexes unless a suitable index already exists.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's test this principle. Let's create a table with a 10,000 entries and compare lookup times:"
+ "Let's test this principle. Let's create a table with a 10,000 entries and compare lookup times:\n"
]
},
{
@@ -51,16 +51,17 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.schema('dimitri_indexes')\n",
- "schema.drop() # drop previous schema definition (if any) and create anew\n",
- "schema = dj.schema('dimitri_indexes')"
+ "\n",
+ "schema = dj.schema(\"dimitri_indexes\")\n",
+ "schema.drop() # drop previous schema definition (if any) and create anew\n",
+ "schema = dj.schema(\"dimitri_indexes\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's say a mouse in the lab has a lab-specific ID but it also has a separate id issued by the animal facility."
+ "Let's say a mouse in the lab has a lab-specific ID but it also has a separate id issued by the animal facility.\n"
]
},
{
@@ -85,11 +86,17 @@
"outputs": [],
"source": [
"import random\n",
+ "\n",
+ "\n",
"def populate_mice(table, n=200_000):\n",
" \"\"\"insert a bunch of mice\"\"\"\n",
" table.insert(\n",
- " ((random.randint(1,1000_000_000), random.randint(1,1000_000_000)) \n",
- " for i in range(n)), skip_duplicates=True)"
+ " (\n",
+ " (random.randint(1, 1000_000_000), random.randint(1, 1000_000_000))\n",
+ " for i in range(n)\n",
+ " ),\n",
+ " skip_duplicates=True,\n",
+ " )"
]
},
{
@@ -232,7 +239,7 @@
"%%timeit -n6 -r3\n",
"\n",
"# efficient! Uses the primary key\n",
- "(Mouse() & {'mouse_id': random.randint(0, 999_999)}).fetch()"
+ "(Mouse() & {\"mouse_id\": random.randint(0, 999_999)}).fetch()"
]
},
{
@@ -252,32 +259,32 @@
"%%timeit -n6 -r3\n",
"\n",
"# inefficient! Requires a full table scan\n",
- "(Mouse() & {'tag_id': random.randint(0, 999_999)}).fetch()"
+ "(Mouse() & {\"tag_id\": random.randint(0, 999_999)}).fetch()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "The indexed searches are much faster!"
+ "The indexed searches are much faster!\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "To make searches faster on fields other than the primary key or a foreign key, you can add a secondary index explicitly. \n",
+ "To make searches faster on fields other than the primary key or a foreign key, you can add a secondary index explicitly.\n",
"\n",
- "Regular indexes are declared as `index(attr1, ..., attrN)` on a separate line anywhere in the table declration (below the primary key divide). \n",
+ "Regular indexes are declared as `index(attr1, ..., attrN)` on a separate line anywhere in the table declaration (below the primary key divide).\n",
"\n",
- "Indexes can be declared with unique constraint as `unique index (attr1, ..., attrN)`."
+ "Indexes can be declared with unique constraint as `unique index (attr1, ..., attrN)`.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's redeclare the table with a unique index on `tag_id`."
+ "Let's redeclare the table with a unique index on `tag_id`.\n"
]
},
{
@@ -423,7 +430,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Now both types of searches are equally efficient!"
+ "Now both types of searches are equally efficient!\n"
]
},
{
@@ -442,8 +449,8 @@
"source": [
"%%timeit -n6 -r3\n",
"\n",
- "#efficient! Uses the primary key\n",
- "(Mouse2() & {'mouse_id': random.randint(0, 999_999)}).fetch()"
+ "# efficient! Uses the primary key\n",
+ "(Mouse2() & {\"mouse_id\": random.randint(0, 999_999)}).fetch()"
]
},
{
@@ -462,15 +469,15 @@
"source": [
"%%timeit -n6 -r3\n",
"\n",
- "#efficient! Uses the seconary index on tag_id\n",
- "(Mouse2() & {'tag_id': random.randint(0, 999_999)}).fetch()"
+ "# efficient! Uses the secondary index on tag_id\n",
+ "(Mouse2() & {\"tag_id\": random.randint(0, 999_999)}).fetch()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's now imagine that rats in the `Rat` table are identified by the combination of lab the `lab_name` and `rat_id` in each lab:"
+ "Let's now imagine that rats in the `Rat` table are identified by the combination of lab the `lab_name` and `rat_id` in each lab:\n"
]
},
{
@@ -497,11 +504,14 @@
"source": [
"def populate_rats(table):\n",
" lab_names = (\"Cajal\", \"Kandel\", \"Moser\", \"Wiesel\")\n",
- " for date_of_birth in (None, \"2019-10-01\", \n",
- " \"2019-10-02\", \"2019-10-03\", \"2019-10-04\"):\n",
- " table.insert((\n",
- " (random.choice(lab_names), random.randint(1, 1_000_000), date_of_birth) \n",
- " for i in range(100_000)), skip_duplicates=True)"
+ " for date_of_birth in (None, \"2019-10-01\", \"2019-10-02\", \"2019-10-03\", \"2019-10-04\"):\n",
+ " table.insert(\n",
+ " (\n",
+ " (random.choice(lab_names), random.randint(1, 1_000_000), date_of_birth)\n",
+ " for i in range(100_000)\n",
+ " ),\n",
+ " skip_duplicates=True,\n",
+ " )"
]
},
{
@@ -646,14 +656,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Note that dispite the fact that `rat_id` is in the index, search by `rat_id` alone are not helped by the index because it is not first in the index. This is similar to search for a word in a dictionary that orders words alphabetically. Searching by the first letters of a word is easy but searching by the last few letters of a word requires scanning the whole dictionary."
+ "Note that despite the fact that `rat_id` is in the index, search by `rat_id` alone are not helped by the index because it is not first in the index. This is similar to search for a word in a dictionary that orders words alphabetically. Searching by the first letters of a word is easy but searching by the last few letters of a word requires scanning the whole dictionary.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "In this table, the primary key is a unique index on the combination `(lab_id, rat_id)`. Therefore searches on these attributes or on `lab_id` alone are fast. But this index cannot help searches on `rat_id` alone:"
+ "In this table, the primary key is a unique index on the combination `(lab_id, rat_id)`. Therefore searches on these attributes or on `lab_id` alone are fast. But this index cannot help searches on `rat_id` alone:\n"
]
},
{
@@ -673,7 +683,7 @@
"%%timeit -n2 -r10\n",
"\n",
"# inefficient! Requires full table scan.\n",
- "(Rat() & {'rat_id': 300}).fetch()"
+ "(Rat() & {\"rat_id\": 300}).fetch()"
]
},
{
@@ -693,7 +703,7 @@
"%%timeit -n2 -r10\n",
"\n",
"# efficient! Uses the primary key\n",
- "(Rat() & {'rat_id': 300, 'lab_name': 'Cajal'}).fetch()"
+ "(Rat() & {\"rat_id\": 300, \"lab_name\": \"Cajal\"}).fetch()"
]
},
{
@@ -713,14 +723,14 @@
"%%timeit -n2 -r10\n",
"\n",
"# inefficient! Requires a full table scan\n",
- "len(Rat & {'rat_id': 500})"
+ "len(Rat & {\"rat_id\": 500})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "Pattern searches in strings can benefit from an index when the starting characters are specified."
+ "Pattern searches in strings can benefit from an index when the starting characters are specified.\n"
]
},
{
@@ -767,7 +777,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Similarly, searching by the date requires an inefficient full-table scan:"
+ "Similarly, searching by the date requires an inefficient full-table scan:\n"
]
},
{
@@ -793,7 +803,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "To speed up searches by the `rat_id` and `date_of_birth`, we can explicit indexes to `Rat`:"
+ "To speed up searches by the `rat_id` and `date_of_birth`, we can explicit indexes to `Rat`:\n"
]
},
{
@@ -841,7 +851,7 @@
"%%timeit -n3 -r6\n",
"\n",
"# efficient! uses index on rat_id\n",
- "(Rat2() & {'rat_id': 300}).fetch()"
+ "(Rat2() & {\"rat_id\": 300}).fetch()"
]
},
{
@@ -868,7 +878,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### Quiz: How many indexes does the table `Rat` have?"
+ "#### Quiz: How many indexes does the table `Rat` have?\n"
]
},
{
@@ -891,14 +901,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Three: primary key, rat_id, date_of_birth"
+ "Three: primary key, rat_id, date_of_birth\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Indexes in SQL"
+ "# Indexes in SQL\n"
]
},
{
@@ -908,11 +918,11 @@
"outputs": [],
"source": [
"import json\n",
- "import pymysql \n",
+ "import pymysql\n",
"\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
- "with open('cred.json') as f:\n",
+ "with open(\"cred.json\") as f:\n",
" creds = json.load(f)\n",
"\n",
"connection_string = \"mysql://{user}:{password}@{host}\".format(**creds)\n",
diff --git a/db-course/003-UUID.ipynb b/db-course/003-UUID.ipynb
index 257830c..f1f3c37 100644
--- a/db-course/003-UUID.ipynb
+++ b/db-course/003-UUID.ipynb
@@ -218,7 +218,7 @@
"metadata": {},
"outputs": [],
"source": [
- "top = uuid.UUID('00000000-0000-0000-0000-000000000000')\n"
+ "top = uuid.UUID(\"00000000-0000-0000-0000-000000000000\")"
]
},
{
@@ -258,7 +258,7 @@
}
],
"source": [
- "topic = uuid.uuid5(top, 'Neuroscience')\n",
+ "topic = uuid.uuid5(top, \"Neuroscience\")\n",
"topic"
]
},
@@ -279,7 +279,7 @@
}
],
"source": [
- "subject1 = uuid.uuid5(topic, 'Habenula')\n",
+ "subject1 = uuid.uuid5(topic, \"Habenula\")\n",
"subject1"
]
},
@@ -304,14 +304,14 @@
}
],
"source": [
- "top = uuid.UUID('00000000-0000-0000-0000-000000000000')\n",
- "topic = uuid.uuid5(top, 'Neuroscience')\n",
- "subject1 = uuid.uuid5(topic, 'Habenula')\n",
- "subject2 = uuid.uuid5(topic, 'Entorhinal cortex')\n",
- "subject3 = uuid.uuid5(topic, 'Habenula')\n",
+ "top = uuid.UUID(\"00000000-0000-0000-0000-000000000000\")\n",
+ "topic = uuid.uuid5(top, \"Neuroscience\")\n",
+ "subject1 = uuid.uuid5(topic, \"Habenula\")\n",
+ "subject2 = uuid.uuid5(topic, \"Entorhinal cortex\")\n",
+ "subject3 = uuid.uuid5(topic, \"Habenula\")\n",
"\n",
- "topic = uuid.uuid5(top, 'Philosophy')\n",
- "subject4 = uuid.uuid5(topic, 'Habenula')\n",
+ "topic = uuid.uuid5(top, \"Philosophy\")\n",
+ "subject4 = uuid.uuid5(topic, \"Habenula\")\n",
"\n",
"topic, subject1, subject2, subject3, subject4"
]
@@ -333,7 +333,7 @@
}
],
"source": [
- "uuid.uuid5(subject4, 'study'*1000000)"
+ "uuid.uuid5(subject4, \"study\" * 1000000)"
]
},
{
@@ -413,6 +413,7 @@
],
"source": [
"import datajoint as dj\n",
+ "\n",
"dj.__version__"
]
},
@@ -431,7 +432,7 @@
}
],
"source": [
- "schema = dj.Schema('uuid')"
+ "schema = dj.Schema(\"uuid\")"
]
},
{
@@ -486,8 +487,8 @@
}
],
"source": [
- "# For the curious: Internally, DataJoint represents uuids as BINARY(16) \n",
- "Message.heading['message_id'].sql"
+ "# For the curious: Internally, DataJoint represents uuids as BINARY(16)\n",
+ "Message.heading[\"message_id\"].sql"
]
},
{
@@ -496,8 +497,8 @@
"metadata": {},
"outputs": [],
"source": [
- "Message.insert1((uuid.uuid1(), 'Hello, world!'))\n",
- "Message.insert1((uuid.uuid1(), 'Cogito ergo sum'))"
+ "Message.insert1((uuid.uuid1(), \"Hello, world!\"))\n",
+ "Message.insert1((uuid.uuid1(), \"Cogito ergo sum\"))"
]
},
{
@@ -617,8 +618,8 @@
"metadata": {},
"outputs": [],
"source": [
- "Message.insert1((uuid.uuid1(), 'I will be back'))\n",
- "Message.insert1((uuid.uuid1(), 'Must destroy humans.'))"
+ "Message.insert1((uuid.uuid1(), \"I will be back\"))\n",
+ "Message.insert1((uuid.uuid1(), \"Must destroy humans.\"))"
]
},
{
@@ -779,7 +780,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Message.insert1((uuid.uuid4(), 'Hasta la vista baby'))"
+ "Message.insert1((uuid.uuid4(), \"Hasta la vista baby\"))"
]
},
{
@@ -933,7 +934,7 @@
],
"source": [
"# For the curious: This is how the table was declared in SQL\n",
- "print(schema.connection.query('show create table `uuid`.`comment`').fetchall()[0][1])"
+ "print(schema.connection.query(\"show create table `uuid`.`comment`\").fetchall()[0][1])"
]
},
{
@@ -993,7 +994,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Message.fetch('KEY')"
+ "keys = Message.fetch(\"KEY\")"
]
},
{
diff --git a/db-course/004-DatabaseHotel.ipynb b/db-course/004-DatabaseHotel.ipynb
index afda88f..5027878 100644
--- a/db-course/004-DatabaseHotel.ipynb
+++ b/db-course/004-DatabaseHotel.ipynb
@@ -57,7 +57,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('hotel')"
+ "\n",
+ "schema = dj.Schema(\"hotel\")"
]
},
{
@@ -282,6 +283,7 @@
"import random\n",
"import datetime\n",
"import tqdm\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -293,7 +295,7 @@
"outputs": [],
"source": [
"# populate rooms\n",
- "Room.insert((i, \"Deluxe\" if i%2 else \"Suite\") for i in range(80))"
+ "Room.insert((i, \"Deluxe\" if i % 2 else \"Suite\") for i in range(80))"
]
},
{
@@ -316,12 +318,11 @@
"days = 45\n",
"\n",
"for day in tqdm.tqdm(range(days)):\n",
- " price = random.randint(40, 350) \n",
+ " price = random.randint(40, 350)\n",
" RoomAvailable.insert(\n",
- " dict(key, \n",
- " date=start_date + datetime.timedelta(days=day), \n",
- " price=price) for key in Room.fetch('KEY'))\n",
- " "
+ " dict(key, date=start_date + datetime.timedelta(days=day), price=price)\n",
+ " for key in Room.fetch(\"KEY\")\n",
+ " )"
]
},
{
@@ -339,12 +340,28 @@
"metadata": {},
"outputs": [],
"source": [
- "class HotelException(Exception): pass\n",
- "class RoomUnavailable(HotelException): pass\n",
- "class RoomAlreadyReserved(HotelException): pass\n",
- "class AlreadyChecked(HotelException): pass\n",
- "class NoReservation(HotelException): pass\n",
- "class NotCheckedIn(HotelException): pass"
+ "class HotelException(Exception):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "class RoomUnavailable(HotelException):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "class RoomAlreadyReserved(HotelException):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "class AlreadyChecked(HotelException):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "class NoReservation(HotelException):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "class NotCheckedIn(HotelException):\n",
+ " pass"
]
},
{
@@ -356,19 +373,18 @@
"source": [
"def reserve_room(room, date, guest_name, credit_card):\n",
" # lookup guest by name\n",
- " keys = (Guest & {'guest_name': guest_name}).fetch('KEY')\n",
- " \n",
+ " keys = (Guest & {\"guest_name\": guest_name}).fetch(\"KEY\")\n",
+ "\n",
" if keys:\n",
" # if multiple found, use the first, for example\n",
- " key = keys[0] \n",
+ " key = keys[0]\n",
" else:\n",
" # if not registered before, create a new guest id\n",
- " key = dict(guest_id=random.randint(0, 2**32-1))\n",
+ " key = dict(guest_id=random.randint(0, 2**32 - 1))\n",
" Guest.insert1(dict(key, guest_name=guest_name))\n",
- " \n",
+ "\n",
" try:\n",
- " Reservation.insert1(\n",
- " dict(key, room=room, date=date, credit_card=credit_card))\n",
+ " Reservation.insert1(dict(key, room=room, date=date, credit_card=credit_card))\n",
" except dj.errors.DuplicateError:\n",
" raise RoomAlreadyReserved(room, date.isoformat()) from None\n",
" except dj.errors.IntegrityError:\n",
@@ -1453,18 +1469,24 @@
"\n",
"for i in tqdm.tqdm(range(number_of_guests)):\n",
" guest = fake.name()\n",
- " credit_card=' '.join((fake.credit_card_number(), \n",
- " fake.credit_card_expire(), \n",
- " fake.credit_card_security_code()))\n",
- " \n",
+ " credit_card = \" \".join(\n",
+ " (\n",
+ " fake.credit_card_number(),\n",
+ " fake.credit_card_expire(),\n",
+ " fake.credit_card_security_code(),\n",
+ " )\n",
+ " )\n",
+ "\n",
" for j in range(random.randint(1, max_nights)):\n",
- " date = fake.date_between_dates(start_date, start_date+datetime.timedelta(days=45))\n",
+ " date = fake.date_between_dates(\n",
+ " start_date, start_date + datetime.timedelta(days=45)\n",
+ " )\n",
" room = random.randint(0, 80)\n",
" try:\n",
" reserve_room(room, date, guest, credit_card)\n",
" except HotelException as e:\n",
" print(repr(e))\n",
- " \n",
+ "\n",
"# show successful reservations\n",
"Reservation()"
]
@@ -1626,12 +1648,12 @@
],
"source": [
"# checkin a bunch of people\n",
- "checkins = random.sample(Reservation().fetch('KEY'), k=int(0.9*len(Reservation())))\n",
+ "checkins = random.sample(Reservation().fetch(\"KEY\"), k=int(0.9 * len(Reservation())))\n",
"for r in tqdm.tqdm(checkins):\n",
- " try: \n",
+ " try:\n",
" check_in(**r)\n",
" except AlreadyChecked as e:\n",
- " print(repr(e))\n"
+ " print(repr(e))"
]
},
{
@@ -1816,7 +1838,7 @@
],
"source": [
"# checkout a bunch of people\n",
- "checkouts = random.sample(CheckIn().fetch('KEY'), k=int(0.9*len(CheckIn())))\n",
+ "checkouts = random.sample(CheckIn().fetch(\"KEY\"), k=int(0.9 * len(CheckIn())))\n",
"for r in tqdm.tqdm(checkouts):\n",
" try:\n",
" check_out(**r)\n",
@@ -1968,7 +1990,7 @@
}
],
"source": [
- "Guest & (Reservation & (CheckIn & 'room=1'))"
+ "Guest & (Reservation & (CheckIn & \"room=1\"))"
]
},
{
@@ -2080,7 +2102,7 @@
}
],
"source": [
- "Guest & (Reservation * CheckIn & 'room=1')"
+ "Guest & (Reservation * CheckIn & \"room=1\")"
]
},
{
@@ -2109,7 +2131,7 @@
"outputs": [],
"source": [
"# pick a guest\n",
- "guest = random.choice(Guest().fetch('KEY'))"
+ "guest = random.choice(Guest().fetch(\"KEY\"))"
]
},
{
diff --git a/db-course/004-DatabaseNations.ipynb b/db-course/004-DatabaseNations.ipynb
index 8f25a8b..b286da2 100644
--- a/db-course/004-DatabaseNations.ipynb
+++ b/db-course/004-DatabaseNations.ipynb
@@ -23,6 +23,7 @@
],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
diff --git a/db-course/004-DatabaseSales.ipynb b/db-course/004-DatabaseSales.ipynb
index f4f465e..531807d 100644
--- a/db-course/004-DatabaseSales.ipynb
+++ b/db-course/004-DatabaseSales.ipynb
@@ -7,6 +7,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
@@ -16,7 +17,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {},
"outputs": [
{
@@ -279,7 +280,7 @@
"(144,'Volvo Model Replicas, Co','Berglund','Christina ','0921-12 3555','Berguvsvägen 8',NULL,'Luleå',NULL,'S-958 22','Sweden',1504,'53100.00'),\n",
"(145,'Danish Wholesale Imports','Petersen','Jytte ','31 12 3555','Vinbæltet 34',NULL,'Kobenhavn',NULL,'1734','Denmark',1401,'83400.00'),\n",
"(146,'Saveley & Henriot, Co.','Saveley','Mary ','78.32.5555','2, rue du Commerce',NULL,'Lyon',NULL,'69004','France',1337,'123900.00'),\n",
- "(148,'Dragon Souveniers, Ltd.','Natividad','Eric','+65 221 7555','Bronz Sok.','Bronz Apt. 3/6 Tesvikiye','Singapore',NULL,'079903','Singapore',1621,'103800.00'),\n",
+ "(148,'Dragon Souvenirs, Ltd.','Natividad','Eric','+65 221 7555','Bronz Sok.','Bronz Apt. 3/6 Tesvikiye','Singapore',NULL,'079903','Singapore',1621,'103800.00'),\n",
"(151,'Muscle Machine Inc','Young','Jeff','2125557413','4092 Furth Circle','Suite 400','NYC','NY','10022','USA',1286,'138500.00'),\n",
"(157,'Diecast Classics Inc.','Leong','Kelvin','2155551555','7586 Pompton St.',NULL,'Allentown','PA','70267','USA',1216,'100600.00'),\n",
"(161,'Technics Stores Inc.','Hashimoto','Juri','6505556809','9408 Furth Circle',NULL,'Burlingame','CA','94217','USA',1165,'84600.00'),\n",
@@ -291,7 +292,7 @@
"(172,'La Corne D\\'abondance, Co.','Bertrand','Marie','(1) 42.34.2555','265, boulevard Charonne',NULL,'Paris',NULL,'75012','France',1337,'84300.00'),\n",
"(173,'Cambridge Collectables Co.','Tseng','Jerry','6175555555','4658 Baden Av.',NULL,'Cambridge','MA','51247','USA',1188,'43400.00'),\n",
"(175,'Gift Depot Inc.','King','Julie','2035552570','25593 South Bay Ln.',NULL,'Bridgewater','CT','97562','USA',1323,'84300.00'),\n",
- "(177,'Osaka Souveniers Co.','Kentary','Mory','+81 06 6342 5555','1-6-20 Dojima',NULL,'Kita-ku','Osaka',' 530-0003','Japan',1621,'81200.00'),\n",
+ "(177,'Osaka Souvenirs Co.','Kentary','Mory','+81 06 6342 5555','1-6-20 Dojima',NULL,'Kita-ku','Osaka',' 530-0003','Japan',1621,'81200.00'),\n",
"(181,'Vitachrome Inc.','Frick','Michael','2125551500','2678 Kingston Rd.','Suite 101','NYC','NY','10022','USA',1286,'76400.00'),\n",
"(186,'Toys of Finland, Co.','Karttunen','Matti','90-224 8555','Keskuskatu 45',NULL,'Helsinki',NULL,'21240','Finland',1501,'96500.00'),\n",
"(187,'AV Stores, Co.','Ashworth','Rachel','(171) 555-1555','Fauntleroy Circus',NULL,'Manchester',NULL,'EC2 5NT','UK',1501,'136800.00'),\n",
@@ -334,7 +335,7 @@
"(319,'Mini Classics','Frick','Steve','9145554562','3758 North Pendale Street',NULL,'White Plains','NY','24067','USA',1323,'102700.00'),\n",
"(320,'Mini Creations Ltd.','Huang','Wing','5085559555','4575 Hillside Dr.',NULL,'New Bedford','MA','50553','USA',1188,'94500.00'),\n",
"(321,'Corporate Gift Ideas Co.','Brown','Julie','6505551386','7734 Strong St.',NULL,'San Francisco','CA','94217','USA',1165,'105000.00'),\n",
- "(323,'Down Under Souveniers, Inc','Graham','Mike','+64 9 312 5555','162-164 Grafton Road','Level 2','Auckland ',NULL,NULL,'New Zealand',1612,'88000.00'),\n",
+ "(323,'Down Under Souvenirs, Inc','Graham','Mike','+64 9 312 5555','162-164 Grafton Road','Level 2','Auckland ',NULL,NULL,'New Zealand',1612,'88000.00'),\n",
"(324,'Stylish Desk Decors, Co.','Brown','Ann ','(171) 555-0297','35 King George',NULL,'London',NULL,'WX3 6FW','UK',1501,'77000.00'),\n",
"(328,'Tekni Collectables Inc.','Brown','William','2015559350','7476 Moss Rd.',NULL,'Newark','NJ','94019','USA',1323,'43000.00'),\n",
"(333,'Australian Gift Network, Co','Calaghan','Ben','61-7-3844-6555','31 Duncan St. West End',NULL,'South Brisbane','Queensland','4101','Australia',1611,'51600.00'),\n",
@@ -351,7 +352,7 @@
"(361,'Kommission Auto','Josephs','Karin','0251-555259','Luisenstr. 48',NULL,'Münster',NULL,'44087','Germany',NULL,'0.00'),\n",
"(362,'Gifts4AllAges.com','Yoshido','Juri','6175559555','8616 Spinnaker Dr.',NULL,'Boston','MA','51003','USA',1216,'41900.00'),\n",
"(363,'Online Diecast Creations Co.','Young','Dorothy','6035558647','2304 Long Airport Avenue',NULL,'Nashua','NH','62005','USA',1216,'114200.00'),\n",
- "(369,'Lisboa Souveniers, Inc','Rodriguez','Lino ','(1) 354-2555','Jardim das rosas n. 32',NULL,'Lisboa',NULL,'1675','Portugal',NULL,'0.00'),\n",
+ "(369,'Lisboa Souvenirs, Inc','Rodriguez','Lino ','(1) 354-2555','Jardim das rosas n. 32',NULL,'Lisboa',NULL,'1675','Portugal',NULL,'0.00'),\n",
"(376,'Precious Collectables','Urs','Braun','0452-076555','Hauptstr. 29',NULL,'Bern',NULL,'3012','Switzerland',1702,'0.00'),\n",
"(379,'Collectables For Less Inc.','Nelson','Allen','6175558555','7825 Douglas Av.',NULL,'Brickhaven','MA','58339','USA',1188,'70700.00'),\n",
"(381,'Royale Belge','Cartrain','Pascale ','(071) 23 67 2555','Boulevard Tirou, 255',NULL,'Charleroi',NULL,'B-6000','Belgium',1401,'23500.00'),\n",
@@ -438,7 +439,7 @@
"\n",
"('S12_3891','1969 Ford Falcon','Classic Cars','1:12','Second Gear Diecast','Turnable front wheels; steering function; detailed interior; detailed engine; opening hood; opening trunk; opening doors; and detailed chassis.',1049,'83.05','173.02'),\n",
"\n",
- "('S12_3990','1970 Plymouth Hemi Cuda','Classic Cars','1:12','Studio M Art Models','Very detailed 1970 Plymouth Cuda model in 1:12 scale. The Cuda is generally accepted as one of the fastest original muscle cars from the 1970s. This model is a reproduction of one of the orginal 652 cars built in 1970. Red color.',5663,'31.92','79.80'),\n",
+ "('S12_3990','1970 Plymouth Hemi Cuda','Classic Cars','1:12','Studio M Art Models','Very detailed 1970 Plymouth Cuda model in 1:12 scale. The Cuda is generally accepted as one of the fastest original muscle cars from the 1970s. This model is a reproduction of one of the original 652 cars built in 1970. Red color.',5663,'31.92','79.80'),\n",
"\n",
"('S12_4473','1957 Chevy Pickup','Trucks and Buses','1:12','Exoto Designs','1:12 scale die-cast about 20\\\" long Hood opens, Rubber wheels',6125,'55.70','118.50'),\n",
"\n",
@@ -516,7 +517,7 @@
"\n",
"('S18_4600','1940s Ford truck','Trucks and Buses','1:18','Motor City Art Classics','This 1940s Ford Pick-Up truck is re-created in 1:18 scale of original 1940s Ford truck. This antique style metal 1940s Ford Flatbed truck is all hand-assembled. This collectible 1940\\'s Pick-Up truck is painted in classic dark green color, and features rotating wheels.',3128,'84.76','121.08'),\n",
"\n",
- "('S18_4668','1939 Cadillac Limousine','Vintage Cars','1:18','Studio M Art Models','Features completely detailed interior including Velvet flocked drapes,deluxe wood grain floor, and a wood grain casket with seperate chrome handles',6645,'23.14','50.31'),\n",
+ "('S18_4668','1939 Cadillac Limousine','Vintage Cars','1:18','Studio M Art Models','Features completely detailed interior including Velvet flocked drapes,deluxe wood grain floor, and a wood grain casket with separate chrome handles',6645,'23.14','50.31'),\n",
"\n",
"('S18_4721','1957 Corvette Convertible','Classic Cars','1:18','Classic Metal Creations','1957 die cast Corvette Convertible in Roman Red with white sides and whitewall tires. 1:18 scale quality die-cast with detailed engine and underbvody. Now you can own The Classic Corvette.',1249,'69.93','148.80'),\n",
"\n",
@@ -528,7 +529,7 @@
"\n",
"('S24_1578','1997 BMW R 1100 S','Motorcycles','1:24','Autoart Studio Design','Detailed scale replica with working suspension and constructed from over 70 parts',7003,'60.86','112.70'),\n",
"\n",
- "('S24_1628','1966 Shelby Cobra 427 S/C','Classic Cars','1:24','Carousel DieCast Legends','This diecast model of the 1966 Shelby Cobra 427 S/C includes many authentic details and operating parts. The 1:24 scale model of this iconic lighweight sports car from the 1960s comes in silver and it\\'s own display case.',8197,'29.18','50.31'),\n",
+ "('S24_1628','1966 Shelby Cobra 427 S/C','Classic Cars','1:24','Carousel DieCast Legends','This diecast model of the 1966 Shelby Cobra 427 S/C includes many authentic details and operating parts. The 1:24 scale model of this iconic lightweight sports car from the 1960s comes in silver and it\\'s own display case.',8197,'29.18','50.31'),\n",
"\n",
"('S24_1785','1928 British Royal Navy Airplane','Planes','1:24','Classic Metal Creations','Official logos and insignias',3627,'66.74','109.42'),\n",
"\n",
diff --git a/db-course/004-DatabaseUniversity.ipynb b/db-course/004-DatabaseUniversity.ipynb
index 863d7a4..a1b93e7 100644
--- a/db-course/004-DatabaseUniversity.ipynb
+++ b/db-course/004-DatabaseUniversity.ipynb
@@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Define"
+ "# Define\n"
]
},
{
@@ -27,7 +27,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.schema('university')"
+ "\n",
+ "schema = dj.schema(\"university\")"
]
},
{
@@ -170,7 +171,8 @@
" course_name : varchar(200) # e.g. \"Neurobiology of Sensation and Movement.\"\n",
" credits : decimal(3,1) # number of credits earned by completing the course\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Term(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -178,6 +180,7 @@
" term : enum('Spring', 'Summer', 'Fall')\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Section(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -187,13 +190,15 @@
" ---\n",
" auditorium : varchar(12)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class CurrentTerm(dj.Manual):\n",
" definition = \"\"\"\n",
" -> Term\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Enroll(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -201,6 +206,7 @@
" -> Section\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class LetterGrade(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -209,6 +215,7 @@
" points : decimal(3,2)\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Grade(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -397,6 +404,7 @@
"import random\n",
"import itertools\n",
"import datetime\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -407,14 +415,14 @@
"outputs": [],
"source": [
"def yield_students():\n",
- " fake_name = {'F': fake.name_female, 'M': fake.name_male}\n",
+ " fake_name = {\"F\": fake.name_female, \"M\": fake.name_male}\n",
" while True: # ignore invalid values\n",
" try:\n",
- " sex = random.choice(('F', 'M'))\n",
- " first_name, last_name = fake_name[sex]().split(' ')[:2]\n",
- " street_address, city = fake.address().split('\\n')\n",
- " city, state = city.split(', ')\n",
- " state, zipcode = state.split(' ') \n",
+ " sex = random.choice((\"F\", \"M\"))\n",
+ " first_name, last_name = fake_name[sex]().split(\" \")[:2]\n",
+ " street_address, city = fake.address().split(\"\\n\")\n",
+ " city, state = city.split(\", \")\n",
+ " state, zipcode = state.split(\" \")\n",
" except ValueError:\n",
" continue\n",
" else:\n",
@@ -427,8 +435,10 @@
" home_state=state,\n",
" home_zip=zipcode,\n",
" date_of_birth=str(\n",
- " fake.date_time_between(start_date=\"-35y\", end_date=\"-15y\").date()),\n",
- " home_phone = fake.phone_number()[:20])"
+ " fake.date_time_between(start_date=\"-35y\", end_date=\"-15y\").date()\n",
+ " ),\n",
+ " home_phone=fake.phone_number()[:20],\n",
+ " )"
]
},
{
@@ -437,8 +447,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Student.insert(\n",
- " dict(k, student_id=i) for i, k in zip(range(100,300), yield_students()))"
+ "Student.insert(dict(k, student_id=i) for i, k in zip(range(100, 300), yield_students()))"
]
},
{
@@ -684,15 +693,19 @@
"outputs": [],
"source": [
"Department.insert(\n",
- " dict(dept=dept, \n",
- " dept_name=name, \n",
- " dept_address=fake.address(), \n",
- " dept_phone=fake.phone_number()[:20])\n",
+ " dict(\n",
+ " dept=dept,\n",
+ " dept_name=name,\n",
+ " dept_address=fake.address(),\n",
+ " dept_phone=fake.phone_number()[:20],\n",
+ " )\n",
" for dept, name in [\n",
" [\"CS\", \"Computer Science\"],\n",
" [\"BIOL\", \"Life Sciences\"],\n",
" [\"PHYS\", \"Physics\"],\n",
- " [\"MATH\", \"Mathematics\"]])"
+ " [\"MATH\", \"Mathematics\"],\n",
+ " ]\n",
+ ")"
]
},
{
@@ -701,10 +714,13 @@
"metadata": {},
"outputs": [],
"source": [
- "StudentMajor.insert({**s, **d, \n",
- " 'declare_date':fake.date_between(start_date=datetime.date(1999,1,1))}\n",
- " for s, d in zip(Student.fetch('KEY'), random.choices(Department.fetch('KEY'), k=len(Student())))\n",
- " if random.random() < 0.75)"
+ "StudentMajor.insert(\n",
+ " {**s, **d, \"declare_date\": fake.date_between(start_date=datetime.date(1999, 1, 1))}\n",
+ " for s, d in zip(\n",
+ " Student.fetch(\"KEY\"), random.choices(Department.fetch(\"KEY\"), k=len(Student()))\n",
+ " )\n",
+ " if random.random() < 0.75\n",
+ ")"
]
},
{
@@ -845,57 +861,55 @@
"outputs": [],
"source": [
"# from https://www.utah.edu/\n",
- "Course.insert([\n",
- " ['BIOL', 1006, 'World of Dinosaurs', 3],\n",
- " ['BIOL', 1010, 'Biology in the 21st Century', 3],\n",
- " ['BIOL', 1030, 'Human Biology', 3],\n",
- " ['BIOL', 1210, 'Principles of Biology', 4],\n",
- " ['BIOL', 2010, 'Evolution & Diversity of Life', 3],\n",
- " ['BIOL', 2020, 'Principles of Cell Biology', 3],\n",
- " ['BIOL', 2021, 'Principles of Cell Science', 4],\n",
- " ['BIOL', 2030, 'Principles of Genetics', 3],\n",
- " ['BIOL', 2210, 'Human Genetics',3],\n",
- " ['BIOL', 2325, 'Human Anatomy', 4],\n",
- " ['BIOL', 2330, 'Plants & Society', 3],\n",
- " ['BIOL', 2355, 'Field Botany', 2],\n",
- " ['BIOL', 2420, 'Human Physiology', 4],\n",
- "\n",
- " ['PHYS', 2040, 'Classcal Theoretical Physics II', 4],\n",
- " ['PHYS', 2060, 'Quantum Mechanics', 3],\n",
- " ['PHYS', 2100, 'General Relativity and Cosmology', 3],\n",
- " ['PHYS', 2140, 'Statistical Mechanics', 4],\n",
- " \n",
- " ['PHYS', 2210, 'Physics for Scientists and Engineers I', 4], \n",
- " ['PHYS', 2220, 'Physics for Scientists and Engineers II', 4],\n",
- " ['PHYS', 3210, 'Physics for Scientists I (Honors)', 4],\n",
- " ['PHYS', 3220, 'Physics for Scientists II (Honors)', 4],\n",
- " \n",
- " ['MATH', 1250, 'Calculus for AP Students I', 4],\n",
- " ['MATH', 1260, 'Calculus for AP Students II', 4],\n",
- " ['MATH', 1210, 'Calculus I', 4],\n",
- " ['MATH', 1220, 'Calculus II', 4],\n",
- " ['MATH', 2210, 'Calculus III', 3],\n",
- " \n",
- " ['MATH', 2270, 'Linear Algebra', 4],\n",
- " ['MATH', 2280, 'Introduction to Differential Equations', 4],\n",
- " ['MATH', 3210, 'Foundations of Analysis I', 4],\n",
- " ['MATH', 3220, 'Foundations of Analysis II', 4],\n",
- " \n",
- " ['CS', 1030, 'Foundations of Computer Science', 3],\n",
- " ['CS', 1410, 'Introduction to Object-Oriented Programming', 4],\n",
- " ['CS', 2420, 'Introduction to Algorithms & Data Structures', 4],\n",
- " ['CS', 2100, 'Discrete Structures', 3],\n",
- " ['CS', 3500, 'Software Practice', 4],\n",
- " ['CS', 3505, 'Software Practice II', 3],\n",
- " ['CS', 3810, 'Computer Organization', 4],\n",
- " ['CS', 4400, 'Computer Systems', 4],\n",
- " ['CS', 4150, 'Algorithms', 3],\n",
- " ['CS', 3100, 'Models of Computation', 3],\n",
- " ['CS', 3200, 'Introduction to Scientific Computing', 3],\n",
- " ['CS', 4000, 'Senior Capstone Project - Design Phase', 3],\n",
- " ['CS', 4500, 'Senior Capstone Project', 3],\n",
- " ['CS', 4940, 'Undergraduate Research', 3],\n",
- " ['CS', 4970, 'Computer Science Bachelor''s Thesis', 3]])"
+ "Course.insert(\n",
+ " [\n",
+ " [\"BIOL\", 1006, \"World of Dinosaurs\", 3],\n",
+ " [\"BIOL\", 1010, \"Biology in the 21st Century\", 3],\n",
+ " [\"BIOL\", 1030, \"Human Biology\", 3],\n",
+ " [\"BIOL\", 1210, \"Principles of Biology\", 4],\n",
+ " [\"BIOL\", 2010, \"Evolution & Diversity of Life\", 3],\n",
+ " [\"BIOL\", 2020, \"Principles of Cell Biology\", 3],\n",
+ " [\"BIOL\", 2021, \"Principles of Cell Science\", 4],\n",
+ " [\"BIOL\", 2030, \"Principles of Genetics\", 3],\n",
+ " [\"BIOL\", 2210, \"Human Genetics\", 3],\n",
+ " [\"BIOL\", 2325, \"Human Anatomy\", 4],\n",
+ " [\"BIOL\", 2330, \"Plants & Society\", 3],\n",
+ " [\"BIOL\", 2355, \"Field Botany\", 2],\n",
+ " [\"BIOL\", 2420, \"Human Physiology\", 4],\n",
+ " [\"PHYS\", 2040, \"Classcal Theoretical Physics II\", 4],\n",
+ " [\"PHYS\", 2060, \"Quantum Mechanics\", 3],\n",
+ " [\"PHYS\", 2100, \"General Relativity and Cosmology\", 3],\n",
+ " [\"PHYS\", 2140, \"Statistical Mechanics\", 4],\n",
+ " [\"PHYS\", 2210, \"Physics for Scientists and Engineers I\", 4],\n",
+ " [\"PHYS\", 2220, \"Physics for Scientists and Engineers II\", 4],\n",
+ " [\"PHYS\", 3210, \"Physics for Scientists I (Honors)\", 4],\n",
+ " [\"PHYS\", 3220, \"Physics for Scientists II (Honors)\", 4],\n",
+ " [\"MATH\", 1250, \"Calculus for AP Students I\", 4],\n",
+ " [\"MATH\", 1260, \"Calculus for AP Students II\", 4],\n",
+ " [\"MATH\", 1210, \"Calculus I\", 4],\n",
+ " [\"MATH\", 1220, \"Calculus II\", 4],\n",
+ " [\"MATH\", 2210, \"Calculus III\", 3],\n",
+ " [\"MATH\", 2270, \"Linear Algebra\", 4],\n",
+ " [\"MATH\", 2280, \"Introduction to Differential Equations\", 4],\n",
+ " [\"MATH\", 3210, \"Foundations of Analysis I\", 4],\n",
+ " [\"MATH\", 3220, \"Foundations of Analysis II\", 4],\n",
+ " [\"CS\", 1030, \"Foundations of Computer Science\", 3],\n",
+ " [\"CS\", 1410, \"Introduction to Object-Oriented Programming\", 4],\n",
+ " [\"CS\", 2420, \"Introduction to Algorithms & Data Structures\", 4],\n",
+ " [\"CS\", 2100, \"Discrete Structures\", 3],\n",
+ " [\"CS\", 3500, \"Software Practice\", 4],\n",
+ " [\"CS\", 3505, \"Software Practice II\", 3],\n",
+ " [\"CS\", 3810, \"Computer Organization\", 4],\n",
+ " [\"CS\", 4400, \"Computer Systems\", 4],\n",
+ " [\"CS\", 4150, \"Algorithms\", 3],\n",
+ " [\"CS\", 3100, \"Models of Computation\", 3],\n",
+ " [\"CS\", 3200, \"Introduction to Scientific Computing\", 3],\n",
+ " [\"CS\", 4000, \"Senior Capstone Project - Design Phase\", 3],\n",
+ " [\"CS\", 4500, \"Senior Capstone Project\", 3],\n",
+ " [\"CS\", 4940, \"Undergraduate Research\", 3],\n",
+ " [\"CS\", 4970, \"Computer Science Bachelor\" \"s Thesis\", 3],\n",
+ " ]\n",
+ ")"
]
},
{
@@ -904,23 +918,30 @@
"metadata": {},
"outputs": [],
"source": [
- "Term.insert(dict(term_year=year, term=term) \n",
- " for year in range(1999, 2019) \n",
- " for term in ['Spring', 'Summer', 'Fall'])\n",
+ "Term.insert(\n",
+ " dict(term_year=year, term=term)\n",
+ " for year in range(1999, 2019)\n",
+ " for term in [\"Spring\", \"Summer\", \"Fall\"]\n",
+ ")\n",
+ "\n",
+ "Term().fetch(order_by=(\"term_year DESC\", \"term DESC\"), as_dict=True, limit=1)[0]\n",
"\n",
- "Term().fetch(order_by=('term_year DESC', 'term DESC'), as_dict=True, limit=1)[0]\n",
+ "CurrentTerm().insert1(\n",
+ " {**Term().fetch(order_by=(\"term_year DESC\", \"term DESC\"), as_dict=True, limit=1)[0]}\n",
+ ")\n",
"\n",
- "CurrentTerm().insert1({\n",
- " **Term().fetch(order_by=('term_year DESC', 'term DESC'), as_dict=True, limit=1)[0]})\n",
"\n",
"def make_section(prob):\n",
" for c in (Course * Term).proj():\n",
- " for sec in 'abcd':\n",
+ " for sec in \"abcd\":\n",
" if random.random() < prob:\n",
" break\n",
" yield {\n",
- " **c, 'section': sec, \n",
- " 'auditorium': random.choice('ABCDEF') + str(random.randint(1,100))} \n",
+ " **c,\n",
+ " \"section\": sec,\n",
+ " \"auditorium\": random.choice(\"ABCDEF\") + str(random.randint(1, 100)),\n",
+ " }\n",
+ "\n",
"\n",
"Section.insert(make_section(0.5))"
]
@@ -931,18 +952,21 @@
"metadata": {},
"outputs": [],
"source": [
- "LetterGrade.insert([\n",
- " ['A', 4.00],\n",
- " ['A-', 3.67],\n",
- " ['B+', 3.33],\n",
- " ['B', 3.00],\n",
- " ['B-', 2.67],\n",
- " ['C+', 2.33],\n",
- " ['C', 2.00],\n",
- " ['C-', 1.67],\n",
- " ['D+', 1.33],\n",
- " ['D', 1.00],\n",
- " ['F', 0.00]])"
+ "LetterGrade.insert(\n",
+ " [\n",
+ " [\"A\", 4.00],\n",
+ " [\"A-\", 3.67],\n",
+ " [\"B+\", 3.33],\n",
+ " [\"B\", 3.00],\n",
+ " [\"B-\", 2.67],\n",
+ " [\"C+\", 2.33],\n",
+ " [\"C\", 2.00],\n",
+ " [\"C-\", 1.67],\n",
+ " [\"D+\", 1.33],\n",
+ " [\"D\", 1.00],\n",
+ " [\"F\", 0.00],\n",
+ " ]\n",
+ ")"
]
},
{
@@ -1072,19 +1096,23 @@
}
],
"source": [
- "# Enrollment \n",
- "terms = Term().fetch('KEY')\n",
+ "# Enrollment\n",
+ "terms = Term().fetch(\"KEY\")\n",
"quit_prob = 0.1\n",
- "for student in tqdm(Student.fetch('KEY')):\n",
+ "for student in tqdm(Student.fetch(\"KEY\")):\n",
" start_term = random.randrange(len(terms))\n",
" for term in terms[start_term:]:\n",
" if random.random() < quit_prob:\n",
" break\n",
" else:\n",
- " sections = ((Section & term) - (Course & (Enroll & student))).fetch('KEY')\n",
+ " sections = ((Section & term) - (Course & (Enroll & student))).fetch(\"KEY\")\n",
" if sections:\n",
- " Enroll.insert({**student, **section} for section in \n",
- " random.sample(sections, random.randrange(min(5, len(sections)))))"
+ " Enroll.insert(\n",
+ " {**student, **section}\n",
+ " for section in random.sample(\n",
+ " sections, random.randrange(min(5, len(sections)))\n",
+ " )\n",
+ " )"
]
},
{
@@ -1094,28 +1122,30 @@
"outputs": [],
"source": [
"# assign random grades\n",
- "grades = LetterGrade.fetch('grade')\n",
+ "grades = LetterGrade.fetch(\"grade\")\n",
"\n",
- "grade_keys = Enroll.fetch('KEY')\n",
+ "grade_keys = Enroll.fetch(\"KEY\")\n",
"random.shuffle(grade_keys)\n",
- "grade_keys = grade_keys[:len(grade_keys)*9//10]\n",
+ "grade_keys = grade_keys[: len(grade_keys) * 9 // 10]\n",
"\n",
- "Grade.insert({**key, 'grade':grade} \n",
- " for key, grade in zip(grade_keys, random.choices(grades, k=len(grade_keys))))"
+ "Grade.insert(\n",
+ " {**key, \"grade\": grade}\n",
+ " for key, grade in zip(grade_keys, random.choices(grades, k=len(grade_keys)))\n",
+ ")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Queries"
+ "# Queries\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Restriction"
+ "## Restriction\n"
]
},
{
@@ -1271,7 +1301,7 @@
],
"source": [
"# Students from Texas\n",
- "Student & {'home_state': 'TX'}"
+ "Student & {\"home_state\": \"TX\"}"
]
},
{
@@ -1663,7 +1693,7 @@
],
"source": [
"# Male students from outside Texas\n",
- "(Student & 'sex=\"M\"') - {'home_state': 'TX'}"
+ "(Student & 'sex=\"M\"') - {\"home_state\": \"TX\"}"
]
},
{
@@ -1889,7 +1919,7 @@
],
"source": [
"# Students from TX, OK, or NM\n",
- "Student & [{'home_state':'OK'}, {'home_state':'NM'}, {'home_state':'TX'}] "
+ "Student & [{\"home_state\": \"OK\"}, {\"home_state\": \"NM\"}, {\"home_state\": \"TX\"}]"
]
},
{
@@ -1898,7 +1928,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# Millenials\n",
+ "# Millennials\n",
"millennials = Student & 'date_of_birth between \"1981-01-01\" and \"1996-12-31\"'"
]
},
@@ -2577,7 +2607,7 @@
}
],
"source": [
- "# Millenials who have never enrolled\n",
+ "# Millennials who have never enrolled\n",
"millennials - Enroll"
]
},
@@ -3575,7 +3605,7 @@
"metadata": {},
"outputs": [],
"source": [
- "#Students who have taken classes or have chosen a major\n",
+ "# Students who have taken classes or have chosen a major\n",
"Student & [Enroll, StudentMajor]"
]
},
@@ -3593,7 +3623,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Join"
+ "## Join\n"
]
},
{
@@ -3623,7 +3653,7 @@
"outputs": [],
"source": [
"# Enrollment with major information\n",
- "Enroll * StudentMajor.proj(major='dept')"
+ "Enroll * StudentMajor.proj(major=\"dept\")"
]
},
{
@@ -3633,7 +3663,7 @@
"outputs": [],
"source": [
"# Enrollment outside chosen major\n",
- "Enroll * StudentMajor.proj(major='dept') & 'major<>dept'"
+ "Enroll * StudentMajor.proj(major=\"dept\") & \"major<>dept\""
]
},
{
@@ -3642,8 +3672,8 @@
"metadata": {},
"outputs": [],
"source": [
- "# Enrollment not matching major \n",
- "Enroll - StudentMajor "
+ "# Enrollment not matching major\n",
+ "Enroll - StudentMajor"
]
},
{
@@ -3653,14 +3683,14 @@
"outputs": [],
"source": [
"# Total grade points\n",
- "(Course * Grade * LetterGrade).proj(total='points*credits')"
+ "(Course * Grade * LetterGrade).proj(total=\"points*credits\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Aggr"
+ "## Aggr\n"
]
},
{
@@ -3670,7 +3700,7 @@
"outputs": [],
"source": [
"# Students in each section\n",
- "Section.aggr(Enroll, n='count(*)')"
+ "Section.aggr(Enroll, n=\"count(*)\")"
]
},
{
@@ -3680,7 +3710,7 @@
"outputs": [],
"source": [
"# Average grade in each course\n",
- "Course.aggr(Grade*LetterGrade, avg_grade='avg(points)')"
+ "Course.aggr(Grade * LetterGrade, avg_grade=\"avg(points)\")"
]
},
{
@@ -3690,8 +3720,9 @@
"outputs": [],
"source": [
"# Fraction graded in each section\n",
- "(Section.aggr(Enroll,n='count(*)') * Section.aggr(Grade, m='count(*)')).proj(\n",
- " 'm','n',frac='m/n')"
+ "(Section.aggr(Enroll, n=\"count(*)\") * Section.aggr(Grade, m=\"count(*)\")).proj(\n",
+ " \"m\", \"n\", frac=\"m/n\"\n",
+ ")"
]
},
{
@@ -3701,8 +3732,7 @@
"outputs": [],
"source": [
"# Student GPA\n",
- "Student.aggr(Course * Grade * LetterGrade, \n",
- " gpa='sum(points*credits)/sum(credits)')"
+ "Student.aggr(Course * Grade * LetterGrade, gpa=\"sum(points*credits)/sum(credits)\")"
]
},
{
@@ -3712,9 +3742,8 @@
"outputs": [],
"source": [
"# Average GPA for each major\n",
- "gpa = Student.aggr(Course * Grade * LetterGrade,\n",
- " gpa='sum(points*credits)/sum(credits)')\n",
- "Department.aggr(StudentMajor*gpa, avg_gpa='avg(gpa)')"
+ "gpa = Student.aggr(Course * Grade * LetterGrade, gpa=\"sum(points*credits)/sum(credits)\")\n",
+ "Department.aggr(StudentMajor * gpa, avg_gpa=\"avg(gpa)\")"
]
}
],
diff --git a/db-course/004-Design-HW.ipynb b/db-course/004-Design-HW.ipynb
index 4dbc644..cb8a4b6 100644
--- a/db-course/004-Design-HW.ipynb
+++ b/db-course/004-Design-HW.ipynb
@@ -47,6 +47,7 @@
" email : varchar(100) NOT NULL\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Assignment(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -57,6 +58,7 @@
" due_date: date\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class GradedAssignment(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -96,7 +98,7 @@
" author: varchar(255)\n",
" \"\"\"\n",
"\n",
- " \n",
+ "\n",
"@schema\n",
"class Copy(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -104,6 +106,7 @@
" copy_id: int \n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Member(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -113,6 +116,7 @@
" address: varchar(255)\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Checkout(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -155,6 +159,7 @@
" \n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Customer(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -169,6 +174,7 @@
" -> Branch.proj(home_branch=\"branch_id\")\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Account(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -198,6 +204,7 @@
" bank_name: VARCHAR(30)\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class HomeBranch(dj.Manual):\n",
" definition = \"\"\" \n",
@@ -210,6 +217,7 @@
"\n",
"\"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Customers(dj.Manual):\n",
" definition = \"\"\" \n",
diff --git a/db-course/004-Design.ipynb b/db-course/004-Design.ipynb
index 0285ebe..02c6a4a 100644
--- a/db-course/004-Design.ipynb
+++ b/db-course/004-Design.ipynb
@@ -38,7 +38,7 @@
"source": [
"import datajoint as dj\n",
"\n",
- "schema = dj.Schema('app')"
+ "schema = dj.Schema(\"app\")"
]
},
{
@@ -56,7 +56,7 @@
" last_name : varchar(30)\n",
" dob=null : date\n",
" sex='' : enum('F', 'M', '')\n",
- " \"\"\"\n"
+ " \"\"\""
]
},
{
@@ -91,7 +91,11 @@
" addon_name : varchar(30)\n",
" price : decimal(5, 2) unsigned\n",
" \"\"\"\n",
- " contents = ((1, \"Track & Field\", 13.99), (2, \"Marathon\", 26.2), (3, \"Sprint\", 100.00))"
+ " contents = (\n",
+ " (1, \"Track & Field\", 13.99),\n",
+ " (2, \"Marathon\", 26.2),\n",
+ " (3, \"Sprint\", 100.00),\n",
+ " )"
]
},
{
@@ -413,6 +417,7 @@
"import random\n",
"from tqdm import tqdm\n",
"from faker import Faker\n",
+ "\n",
"fake = Faker()"
]
},
@@ -423,12 +428,15 @@
"outputs": [],
"source": [
"# insert one account\n",
- "Account.insert1(dict(\n",
- " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
- " first_name=fake.first_name_male(),\n",
- " last_name=fake.last_name(),\n",
- " sex=\"M\",\n",
- " dob=fake.date_of_birth()))"
+ "Account.insert1(\n",
+ " dict(\n",
+ " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
+ " first_name=fake.first_name_male(),\n",
+ " last_name=fake.last_name(),\n",
+ " sex=\"M\",\n",
+ " dob=fake.date_of_birth(),\n",
+ " )\n",
+ ")"
]
},
{
@@ -438,26 +446,38 @@
"outputs": [],
"source": [
"# insert many male accounts\n",
- "Account.insert(dict(\n",
- " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
- " first_name=fake.first_name_male(),\n",
- " last_name=fake.last_name(),\n",
- " sex='M',\n",
- " dob=fake.date_of_birth()) for _ in range(5000))\n",
+ "Account.insert(\n",
+ " dict(\n",
+ " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
+ " first_name=fake.first_name_male(),\n",
+ " last_name=fake.last_name(),\n",
+ " sex=\"M\",\n",
+ " dob=fake.date_of_birth(),\n",
+ " )\n",
+ " for _ in range(5000)\n",
+ ")\n",
"\n",
"# insert many female accounts\n",
- "Account.insert(dict(\n",
- " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
- " first_name=fake.first_name_female(),\n",
- " last_name=fake.last_name(),\n",
- " sex='F',\n",
- " dob=fake.date_of_birth()) for _ in range(5000))\n",
+ "Account.insert(\n",
+ " dict(\n",
+ " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
+ " first_name=fake.first_name_female(),\n",
+ " last_name=fake.last_name(),\n",
+ " sex=\"F\",\n",
+ " dob=fake.date_of_birth(),\n",
+ " )\n",
+ " for _ in range(5000)\n",
+ ")\n",
"\n",
"# insert some accounts with no sex and no birthdate\n",
- "Account.insert(dict(\n",
- " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
- " first_name=fake.first_name(),\n",
- " last_name=fake.last_name()) for _ in range(500))"
+ "Account.insert(\n",
+ " dict(\n",
+ " phone=fake.random_int(1_000_000_0000, 9_999_999_9999),\n",
+ " first_name=fake.first_name(),\n",
+ " last_name=fake.last_name(),\n",
+ " )\n",
+ " for _ in range(500)\n",
+ ")"
]
},
{
@@ -639,11 +659,14 @@
"source": [
"# insert one credit card\n",
"CreditCard.insert1(\n",
- " dict(random.choice(keys), \n",
- " zipcode=random.randint(10000,99999), \n",
- " card_number=int(fake.credit_card_number()),\n",
- " cvv=random.randint(1, 999), \n",
- " exp_date=fake.future_date()))"
+ " dict(\n",
+ " random.choice(keys),\n",
+ " zipcode=random.randint(10000, 99999),\n",
+ " card_number=int(fake.credit_card_number()),\n",
+ " cvv=random.randint(1, 999),\n",
+ " exp_date=fake.future_date(),\n",
+ " )\n",
+ ")"
]
},
{
@@ -654,11 +677,15 @@
"source": [
"# insert many credit cards\n",
"CreditCard.insert(\n",
- " dict(random.choice(keys), \n",
- " zipcode=random.randint(10000,99999), \n",
- " card_number=int(fake.credit_card_number()),\n",
- " cvv=random.randint(1, 999), \n",
- " exp_date=fake.future_date()) for _ in range(15000))"
+ " dict(\n",
+ " random.choice(keys),\n",
+ " zipcode=random.randint(10000, 99999),\n",
+ " card_number=int(fake.credit_card_number()),\n",
+ " cvv=random.randint(1, 999),\n",
+ " exp_date=fake.future_date(),\n",
+ " )\n",
+ " for _ in range(15000)\n",
+ ")"
]
},
{
@@ -668,13 +695,18 @@
"outputs": [],
"source": [
"# get all possible valid purchases, eliminate duplicate purchases that are under different cards\n",
- "purchases = (Account * CreditCard * AddOn - Purchase.proj()).fetch(\"KEY\", order_by=('phone', 'addon_id'))\n",
+ "purchases = (Account * CreditCard * AddOn - Purchase.proj()).fetch(\n",
+ " \"KEY\", order_by=(\"phone\", \"addon_id\")\n",
+ ")\n",
"unique_purchases = [purchases.pop()]\n",
"for purchase in purchases:\n",
- " if (purchase['phone'], purchase['addon_id']) != (unique_purchases[-1]['phone'], unique_purchases[-1]['addon_id']):\n",
+ " if (purchase[\"phone\"], purchase[\"addon_id\"]) != (\n",
+ " unique_purchases[-1][\"phone\"],\n",
+ " unique_purchases[-1][\"addon_id\"],\n",
+ " ):\n",
" unique_purchases.append(dict(purchase, purchase_date=fake.past_date()))\n",
"\n",
- "# insert a random subset \n",
+ "# insert a random subset\n",
"Purchase.insert(random.sample(unique_purchases, 5000))"
]
},
@@ -684,7 +716,7 @@
"metadata": {},
"outputs": [],
"source": [
- "schema.drop() # optionally drop the schema to clear before the next run"
+ "schema.drop() # optionally drop the schema to clear before the next run"
]
},
{
diff --git a/db-course/004-DesignSQL.ipynb b/db-course/004-DesignSQL.ipynb
index 1596fcc..9556326 100644
--- a/db-course/004-DesignSQL.ipynb
+++ b/db-course/004-DesignSQL.ipynb
@@ -23,6 +23,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
diff --git a/db-course/005-Queries-HW.ipynb b/db-course/005-Queries-HW.ipynb
index 0d838bd..9bc842b 100644
--- a/db-course/005-Queries-HW.ipynb
+++ b/db-course/005-Queries-HW.ipynb
@@ -18,6 +18,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
@@ -107,7 +108,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('app')\n",
+ "\n",
+ "schema = dj.Schema(\"app\")\n",
"schema.spawn_missing_classes()\n",
"dj.Diagram(schema)"
]
@@ -241,8 +243,7 @@
}
],
"source": [
- "\n",
- "Account & 'first_name in (\"Paul\", \"Paula\")' & 'year(dob) between 1990 and 1999'"
+ "Account & 'first_name in (\"Paul\", \"Paula\")' & \"year(dob) between 1990 and 1999\""
]
},
{
@@ -390,7 +391,12 @@
}
],
"source": [
- "Account & 'sex=\"M\"' & 'last_name LIKE \"Ra%\"' & 'dob BETWEEN \"2004-01-01\" AND \"2023-01-01\"'"
+ "(\n",
+ " Account\n",
+ " & 'sex=\"M\"'\n",
+ " & 'last_name LIKE \"Ra%\"'\n",
+ " & 'dob BETWEEN \"2004-01-01\" AND \"2023-01-01\"'\n",
+ ")"
]
},
{
@@ -420,7 +426,7 @@
}
],
"source": [
- "(Account & 'sex=\"M\"' & 'last_name LIKE \"Ra%\"').fetch(order_by='dob DESC', limit=10)"
+ "(Account & 'sex=\"M\"' & 'last_name LIKE \"Ra%\"').fetch(order_by=\"dob DESC\", limit=10)"
]
},
{
@@ -558,7 +564,9 @@
}
],
"source": [
- "(Account & 'sex=\"M\"' & 'last_name LIKE \"Ra%\"').fetch(order_by='dob DESC', limit=10, format=\"frame\")"
+ "(Account & 'sex=\"M\"' & 'last_name LIKE \"Ra%\"').fetch(\n",
+ " order_by=\"dob DESC\", limit=10, format=\"frame\"\n",
+ ")"
]
},
{
@@ -806,8 +814,9 @@
}
],
"source": [
- "(Account & 'dob IS NULL').proj(full_name=\"CONCAT(last_name, ', ', first_name)\").fetch(\n",
- " order_by= 'full_name', limit=10, format=\"frame\")"
+ "(Account & \"dob IS NULL\").proj(full_name=\"CONCAT(last_name, ', ', first_name)\").fetch(\n",
+ " order_by=\"full_name\", limit=10, format=\"frame\"\n",
+ ")"
]
},
{
@@ -1114,8 +1123,9 @@
}
],
"source": [
- "(Account & 'sex = \"F\"' & 'MONTH(dob) = 6').proj(full_name='CONCAT(last_name, \", \", first_name)',\n",
- "age='YEAR(CURDATE()) - YEAR(dob)').fetch(order_by='last_name, first_name', limit=10, format='frame')"
+ "(Account & 'sex = \"F\"' & \"MONTH(dob) = 6\").proj(\n",
+ " full_name='CONCAT(last_name, \", \", first_name)', age=\"YEAR(CURDATE()) - YEAR(dob)\"\n",
+ ").fetch(order_by=\"last_name, first_name\", limit=10, format=\"frame\")"
]
},
{
@@ -1297,8 +1307,12 @@
}
],
"source": [
- "(Account & CreditCard).aggr(Account.proj(excluded='sex'), dob='MIN(dob)', full_name='CONCAT(last_name, \", \", first_name)',\n",
- "age_in_day='DAY(CURDATE()) - DAY(dob)').fetch(order_by='dob DESC', limit=1, format='frame')"
+ "(Account & CreditCard).aggr(\n",
+ " Account.proj(excluded=\"sex\"),\n",
+ " dob=\"MIN(dob)\",\n",
+ " full_name='CONCAT(last_name, \", \", first_name)',\n",
+ " age_in_day=\"DAY(CURDATE()) - DAY(dob)\",\n",
+ ").fetch(order_by=\"dob DESC\", limit=1, format=\"frame\")"
]
},
{
@@ -1567,11 +1581,14 @@
}
],
"source": [
- "((Account * Purchase * AddOn) & ('addon_name = \"Sprint\"') & ('dob IS NOT NULL')\n",
- " ).aggr(Account.proj(excluded='sex'),\n",
- " 'last_name', 'first_name', 'purchase_date','addon_name', \n",
- " age='YEAR(CURDATE()) - YEAR(dob)'\n",
- " ).fetch(order_by='purchase_date', limit=10, format='frame')"
+ "((Account * Purchase * AddOn) & ('addon_name = \"Sprint\"') & (\"dob IS NOT NULL\")).aggr(\n",
+ " Account.proj(excluded=\"sex\"),\n",
+ " \"last_name\",\n",
+ " \"first_name\",\n",
+ " \"purchase_date\",\n",
+ " \"addon_name\",\n",
+ " age=\"YEAR(CURDATE()) - YEAR(dob)\",\n",
+ ").fetch(order_by=\"purchase_date\", limit=10, format=\"frame\")"
]
},
{
@@ -1673,7 +1690,7 @@
}
],
"source": [
- "Purchase() & \"purchase_date < '2023-09-01'\" "
+ "Purchase() & \"purchase_date < '2023-09-01'\""
]
},
{
@@ -1888,7 +1905,9 @@
}
],
"source": [
- "(Account * Purchase * AddOn & 'addon_name=\"Sprint\"').fetch(order_by='purchase_date', limit=10, format=\"frame\")"
+ "(Account * Purchase * AddOn & 'addon_name=\"Sprint\"').fetch(\n",
+ " order_by=\"purchase_date\", limit=10, format=\"frame\"\n",
+ ")"
]
},
{
diff --git a/db-course/005-Queries.ipynb b/db-course/005-Queries.ipynb
index 9226e7f..81ef6e4 100644
--- a/db-course/005-Queries.ipynb
+++ b/db-course/005-Queries.ipynb
@@ -100,7 +100,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('app')\n",
+ "\n",
+ "schema = dj.Schema(\"app\")\n",
"schema.spawn_missing_classes()\n",
"dj.Diagram(schema)"
]
@@ -491,7 +492,7 @@
}
],
"source": [
- "Account.fetch(as_dict=True, order_by=('last_name', 'first_name'), limit=5)"
+ "Account.fetch(as_dict=True, order_by=(\"last_name\", \"first_name\"), limit=5)"
]
},
{
@@ -535,7 +536,7 @@
}
],
"source": [
- "Account.fetch(as_dict=True, order_by=('dob DESC'), limit=5, offset=100)"
+ "Account.fetch(as_dict=True, order_by=(\"dob DESC\"), limit=5, offset=100)"
]
},
{
@@ -644,7 +645,7 @@
}
],
"source": [
- "Account() & {'phone': 69235537483}"
+ "Account() & {\"phone\": 69235537483}"
]
},
{
@@ -805,7 +806,7 @@
}
],
"source": [
- "Account() & {'first_name': 'Michael'}"
+ "Account() & {\"first_name\": \"Michael\"}"
]
},
{
@@ -1252,7 +1253,7 @@
}
],
"source": [
- "Account & 'DATEDIFF(now(), dob) < 300'"
+ "Account & \"DATEDIFF(now(), dob) < 300\""
]
},
{
@@ -1413,7 +1414,7 @@
}
],
"source": [
- "Account - 'DATEDIFF(now(), dob) < 300'"
+ "Account - \"DATEDIFF(now(), dob) < 300\""
]
},
{
@@ -2138,7 +2139,7 @@
}
],
"source": [
- "Account.proj('last_name')"
+ "Account.proj(\"last_name\")"
]
},
{
@@ -2284,7 +2285,7 @@
}
],
"source": [
- "Account.proj(..., '-dob')"
+ "Account.proj(..., \"-dob\")"
]
},
{
@@ -2421,7 +2422,7 @@
"metadata": {},
"outputs": [],
"source": [
- "accounts = Account.proj('last_name', age=\"floor(datediff(now(), dob)/365.25)\")"
+ "accounts = Account.proj(\"last_name\", age=\"floor(datediff(now(), dob)/365.25)\")"
]
},
{
@@ -2552,7 +2553,7 @@
}
],
"source": [
- "accounts & 'age > 35'"
+ "accounts & \"age > 35\""
]
},
{
@@ -2683,7 +2684,7 @@
}
],
"source": [
- "accounts & 'age between 35 and 45'"
+ "accounts & \"age between 35 and 45\""
]
},
{
@@ -2844,7 +2845,7 @@
}
],
"source": [
- "Account().proj(..., id='phone', name='(first_name)')"
+ "Account().proj(..., id=\"phone\", name=\"(first_name)\")"
]
},
{
@@ -3476,7 +3477,7 @@
],
"source": [
"# All the people that purchased AddOn #2.\n",
- "Account & (Purchase & 'addon_id=2')"
+ "Account & (Purchase & \"addon_id=2\")"
]
},
{
@@ -3623,7 +3624,7 @@
],
"source": [
"# Give me all accounts who have purchased both Addon 2 and 3\n",
- "Account & (Purchase & 'addon_id=2') & (Purchase & 'addon_id=3')"
+ "Account & (Purchase & \"addon_id=2\") & (Purchase & \"addon_id=3\")"
]
},
{
@@ -3770,9 +3771,9 @@
],
"source": [
"# Give me all accounts who have purchased Addon 2 or 3\n",
- "Account & (Purchase & 'addon_id=2 OR addon_id=3')\n",
- "Account & (Purchase & 'addon_id in (2, 3)')\n",
- "Account & (Purchase & ['addon_id=3', 'addon_id=2'])"
+ "Account & (Purchase & \"addon_id=2 OR addon_id=3\")\n",
+ "Account & (Purchase & \"addon_id in (2, 3)\")\n",
+ "Account & (Purchase & [\"addon_id=3\", \"addon_id=2\"])"
]
},
{
diff --git a/db-course/005-QueriesSQL.ipynb b/db-course/005-QueriesSQL.ipynb
index 3ddc0fc..283c0a5 100644
--- a/db-course/005-QueriesSQL.ipynb
+++ b/db-course/005-QueriesSQL.ipynb
@@ -23,6 +23,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"\n",
"%load_ext sql\n",
diff --git a/db-course/006-Joins-HW.ipynb b/db-course/006-Joins-HW.ipynb
index 7067c05..b8bb2d2 100644
--- a/db-course/006-Joins-HW.ipynb
+++ b/db-course/006-Joins-HW.ipynb
@@ -7,6 +7,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -30,19 +31,19 @@
"source": [
"import datajoint as dj\n",
"\n",
- "sales = dj.Schema('classicsales')\n",
+ "sales = dj.Schema(\"classicsales\")\n",
"sales.spawn_missing_classes()\n",
"\n",
- "nations = dj.Schema('nation')\n",
+ "nations = dj.Schema(\"nation\")\n",
"nations.spawn_missing_classes()\n",
"\n",
- "hotel = dj.Schema('hotel')\n",
+ "hotel = dj.Schema(\"hotel\")\n",
"hotel.spawn_missing_classes()\n",
"\n",
- "university = dj.Schema('university')\n",
+ "university = dj.Schema(\"university\")\n",
"university.spawn_missing_classes()\n",
"\n",
- "app = dj.Schema('app')\n",
+ "app = dj.Schema(\"app\")\n",
"app.spawn_missing_classes()"
]
},
@@ -1251,7 +1252,13 @@
}
],
"source": [
- "dj.Diagram(sales) + dj.Diagram(university) + dj.Diagram(nations) + dj.Diagram(hotel) + dj.Diagram(app)"
+ "(\n",
+ " dj.Diagram(sales)\n",
+ " + dj.Diagram(university)\n",
+ " + dj.Diagram(nations)\n",
+ " + dj.Diagram(hotel)\n",
+ " + dj.Diagram(app)\n",
+ ")"
]
},
{
@@ -1260,133 +1267,133 @@
"source": [
"# Homework 6: Joins and Subqueries\n",
"\n",
- "The following queries may require joins. Some queries may be performed using subquiries without joins."
+ "The following queries may require joins. Some queries may be performed using subquiries without joins.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 1 (sales)**: Show customer names along with the last names of their sales rep (omitting ones that don't have a sales rep)."
+ "**Problem 1 (sales)**: Show customer names along with the last names of their sales rep (omitting ones that don't have a sales rep).\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 2 (sales)**: Show all employees, including the last name of their boss (omitting the top boss who reports to no one)."
+ "**Problem 2 (sales)**: Show all employees, including the last name of their boss (omitting the top boss who reports to no one).\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 3 (sales):** Show all employees whose boss' office is outside the USA."
+ "**Problem 3 (sales):** Show all employees whose boss' office is outside the USA.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 4 (sales):** Show all employees whose boss is in a different office."
+ "**Problem 4 (sales):** Show all employees whose boss is in a different office.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 5 (sales):** Show all customers who have bought model trains."
+ "**Problem 5 (sales):** Show all customers who have bought model trains.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 6 (sales):** Show all employees who have not sold model trains."
+ "**Problem 6 (sales):** Show all employees who have not sold model trains.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 7 (nations)**: Show the names of all countries in North America along with their populations in 1996"
+ "**Problem 7 (nations)**: Show the names of all countries in North America along with their populations in 1996\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 8 (nations)**: Show the names of countries on the continent of Oceania along with their populations in 1996"
+ "**Problem 8 (nations)**: Show the names of countries on the continent of Oceania along with their populations in 1996\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 9 (nations)**: Show the top 10 contries by their absolute population increase between 1990 and 2010."
+ "**Problem 9 (nations)**: Show the top 10 countries by their absolute population increase between 1990 and 2010.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 10 (nations)**: Show the top 10 contries by their percent increase in per capita gdp from 1990 to 2010."
+ "**Problem 10 (nations)**: Show the top 10 countries by their percent increase in per capita gdp from 1990 to 2010.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 11 (nations)**: List the top 5 most populous countries where Spanish is the official languag in 2010."
+ "**Problem 11 (nations)**: List the top 5 most populous countries where Spanish is the official language in 2010.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 12 (nations)**: List the top 10 wealthiest (per capita) non-English speaking countries in 2015."
+ "**Problem 12 (nations)**: List the top 10 wealthiest (per capita) non-English speaking countries in 2015.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 13 (hotel)**: List all the reservations for 2023-11-01, including the room price, and the guest's last name. (Feel free to pick a different date.)"
+ "**Problem 13 (hotel)**: List all the reservations for 2023-11-01, including the room price, and the guest's last name. (Feel free to pick a different date.)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 14 (hotel)**: Show all guests who have checked in and not checked out."
+ "**Problem 14 (hotel)**: Show all guests who have checked in and not checked out.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 15 (university):** Pick one student and show his or her course enrollments in the current term."
+ "**Problem 15 (university):** Pick one student and show his or her course enrollments in the current term.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 16 (university):** Show all students who have received As in math in the current term."
+ "**Problem 16 (university):** Show all students who have received As in math in the current term.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 17 (app):** List names of the buyers from the latest 10 sales of the Marathon app."
+ "**Problem 17 (app):** List names of the buyers from the latest 10 sales of the Marathon app.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 18 (app):** List the latest purchase made on the buyers' birthday, including the name of the addon that was purchased."
+ "**Problem 18 (app):** List the latest purchase made on the buyers' birthday, including the name of the addon that was purchased.\n"
]
},
{
@@ -1540,9 +1547,13 @@
}
],
"source": [
- "((Purchase * AddOn * Account & 'month(dob) = month(purchase_date)' \n",
- "& 'day(dob) = day(purchase_date)').proj('purchase_date', 'addon_name')).fetch(\n",
- " order_by='purchase_date desc', limit=1, format='frame') "
+ "(\n",
+ " (\n",
+ " Purchase * AddOn * Account\n",
+ " & \"month(dob) = month(purchase_date)\"\n",
+ " & \"day(dob) = day(purchase_date)\"\n",
+ " ).proj(\"purchase_date\", \"addon_name\")\n",
+ ").fetch(order_by=\"purchase_date desc\", limit=1, format=\"frame\")"
]
},
{
@@ -1676,10 +1687,15 @@
}
],
"source": [
- "%%sql\n",
+ "% % sql\n",
"-- show the latest purchase made on a person's birthday, show the addon name\n",
- "SELECT phone, addon_name, purchase_date FROM account NATURAL JOIN purchase NATURAL JOIN `#add_on` \n",
- "WHERE month(purchase_date) = month(dob) AND day(purchase_date) = day(dob)\n",
+ "SELECT phone, addon_name, purchase_date\n",
+ "FROM account\n",
+ " NATURAL JOIN purchase\n",
+ " NATURAL JOIN `#add_on`\n",
+ "WHERE\n",
+ " month(purchase_date) = month(dob)\n",
+ " AND day(purchase_date) = day(dob)\n",
"ORDER BY purchase_date DESC\n",
"LIMIT 1"
]
@@ -1731,15 +1747,26 @@
}
],
"source": [
- "%%sql\n",
+ "% % sql\n",
"-- show the latest purchase made on a person's birthday, show the addon name\n",
- "SELECT phone, addon_name, purchase_date FROM account NATURAL JOIN purchase NATURAL JOIN `#add_on` \n",
- "WHERE month(purchase_date) = month(dob) AND day(purchase_date) = day(dob)\n",
- "AND purchase_date = (\n",
- " SELECT purchase_date FROM account NATURAL JOIN purchase NATURAL JOIN `#add_on` \n",
- " WHERE month(purchase_date) = month(dob) AND day(purchase_date) = day(dob)\n",
- " ORDER BY purchase_date DESC\n",
- " LIMIT 1)"
+ "SELECT phone, addon_name, purchase_date\n",
+ "FROM account\n",
+ " NATURAL JOIN purchase\n",
+ " NATURAL JOIN `#add_on`\n",
+ "WHERE\n",
+ " month(purchase_date) = month(dob)\n",
+ " AND day(purchase_date) = day(dob)\n",
+ " AND purchase_date = (\n",
+ " SELECT purchase_date\n",
+ " FROM account\n",
+ " NATURAL JOIN purchase\n",
+ " NATURAL JOIN `#add_on`\n",
+ " WHERE\n",
+ " month(purchase_date) = month(dob)\n",
+ " AND day(purchase_date) = day(dob)\n",
+ " ORDER BY purchase_date DESC\n",
+ " LIMIT 1\n",
+ " )"
]
},
{
@@ -2265,10 +2292,16 @@
}
],
"source": [
- "((Continents.proj(..., continent_name=\"name\") * \n",
- "Regions.proj(..., region_name=\"name\") * \n",
- "Countries.proj(..., country_name=\"name\") * CountryStats())\n",
- " & 'continent_name=\"North America\"' & 'year=1996').proj('country_name', 'population')\n"
+ "(\n",
+ " (\n",
+ " Continents.proj(..., continent_name=\"name\")\n",
+ " * Regions.proj(..., region_name=\"name\")\n",
+ " * Countries.proj(..., country_name=\"name\")\n",
+ " * CountryStats()\n",
+ " )\n",
+ " & 'continent_name=\"North America\"'\n",
+ " & \"year=1996\"\n",
+ ").proj(\"country_name\", \"population\")"
]
},
{
@@ -2277,7 +2310,7 @@
"metadata": {},
"outputs": [],
"source": [
- "dj.config['display.limit'] = 30"
+ "dj.config[\"display.limit\"] = 30"
]
},
{
diff --git a/db-course/006-Joins.ipynb b/db-course/006-Joins.ipynb
index c12f8b3..ac67344 100644
--- a/db-course/006-Joins.ipynb
+++ b/db-course/006-Joins.ipynb
@@ -108,7 +108,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('app')\n",
+ "\n",
+ "schema = dj.Schema(\"app\")\n",
"schema.spawn_missing_classes()\n",
"dj.Diagram(schema)"
]
@@ -1129,7 +1130,8 @@
" --- \n",
" full_name : varchar(60)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Dependent(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1267,12 +1269,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Person.insert((\n",
- " (1, \"Bob\"),\n",
- " (2, \"Anne\"),\n",
- " (3, \"Dave\"),\n",
- " (4, \"Carol\")\n",
- "))"
+ "Person.insert(((1, \"Bob\"), (2, \"Anne\"), (3, \"Dave\"), (4, \"Carol\")))"
]
},
{
@@ -1408,7 +1405,9 @@
}
],
"source": [
- "Person * Dependent * Person.proj(provider_id=\"person_id\", provider_full_name=\"full_name\")"
+ "Person * Dependent * Person.proj(\n",
+ " provider_id=\"person_id\", provider_full_name=\"full_name\"\n",
+ ")"
]
},
{
diff --git a/db-course/006-JoinsSQL.ipynb b/db-course/006-JoinsSQL.ipynb
index cd0cfa2..932a148 100644
--- a/db-course/006-JoinsSQL.ipynb
+++ b/db-course/006-JoinsSQL.ipynb
@@ -16,6 +16,7 @@
],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
diff --git a/db-course/007-Aggregation-HW.ipynb b/db-course/007-Aggregation-HW.ipynb
index cf5ff56..8c9fc0c 100644
--- a/db-course/007-Aggregation-HW.ipynb
+++ b/db-course/007-Aggregation-HW.ipynb
@@ -7,6 +7,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -21,19 +22,19 @@
"source": [
"import datajoint as dj\n",
"\n",
- "sales = dj.Schema('classicsales')\n",
+ "sales = dj.Schema(\"classicsales\")\n",
"sales.spawn_missing_classes()\n",
"\n",
- "nations = dj.Schema('nation')\n",
+ "nations = dj.Schema(\"nation\")\n",
"nations.spawn_missing_classes()\n",
"\n",
- "hotel = dj.Schema('hotel')\n",
+ "hotel = dj.Schema(\"hotel\")\n",
"hotel.spawn_missing_classes()\n",
"\n",
- "university = dj.Schema('university')\n",
+ "university = dj.Schema(\"university\")\n",
"university.spawn_missing_classes()\n",
"\n",
- "app = dj.Schema('app')\n",
+ "app = dj.Schema(\"app\")\n",
"app.spawn_missing_classes()"
]
},
@@ -442,7 +443,13 @@
}
],
"source": [
- "dj.Diagram(sales) + dj.Diagram(hotel) + dj.Diagram(university) + dj.Diagram(app) + dj.Diagram(nations)"
+ "(\n",
+ " dj.Diagram(sales)\n",
+ " + dj.Diagram(hotel)\n",
+ " + dj.Diagram(university)\n",
+ " + dj.Diagram(app)\n",
+ " + dj.Diagram(nations)\n",
+ ")"
]
},
{
@@ -456,7 +463,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 1 (sales)**: Show all the offices along with the number of employees in each."
+ "**Problem 1 (sales)**: Show all the offices along with the number of employees in each.\n"
]
},
{
@@ -641,14 +648,14 @@
}
],
"source": [
- "Office.aggr(Employee, ..., employee_count='count(*)')"
+ "Office.aggr(Employee, ..., employee_count=\"count(*)\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 2 (sales)**: Show all employees, including the number of direct reports they have."
+ "**Problem 2 (sales)**: Show all employees, including the number of direct reports they have.\n"
]
},
{
@@ -928,14 +935,16 @@
}
],
"source": [
- "Employee.proj(..., reports_to=\"employee_number\").aggr(Report, n=\"count(employee_number)\", keep_all_rows=True)"
+ "Employee.proj(..., reports_to=\"employee_number\").aggr(\n",
+ " Report, n=\"count(employee_number)\", keep_all_rows=True\n",
+ ")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 3 (sales):** Show the top biggests orders in the current month along with the total amount on the order."
+ "**Problem 3 (sales):** Show the top biggests orders in the current month along with the total amount on the order.\n"
]
},
{
@@ -1050,7 +1059,9 @@
}
],
"source": [
- "(Order & 'order_date between \"2005-01-01\" and \"2005-01-31\"').aggr(Order.Item, total_amount=\"sum(quantity * price)\")"
+ "(Order & 'order_date between \"2005-01-01\" and \"2005-01-31\"').aggr(\n",
+ " Order.Item, total_amount=\"sum(quantity * price)\"\n",
+ ")"
]
},
{
@@ -1361,7 +1372,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 4 (sales):** Show the top 5 customers by the amount of money that they have spent this month, including the amount."
+ "**Problem 4 (sales):** Show the top 5 customers by the amount of money that they have spent this month, including the amount.\n"
]
},
{
@@ -1407,7 +1418,7 @@
" \n",
"
\n",
" 282 | \n",
- " Souveniers And Things Co. | \n",
+ " Souvenirs And Things Co. | \n",
" 42251.51 | \n",
"
\n",
" \n",
@@ -1473,7 +1484,7 @@
"
\n",
" \n",
" 323 | \n",
- " Down Under Souveniers, Inc | \n",
+ " Down Under Souvenirs, Inc | \n",
" 75020.13 | \n",
"
\n",
" \n",
@@ -1500,7 +1511,7 @@
""
],
"text/plain": [
- "[(323, 'Down Under Souveniers, Inc', Decimal('75020.13')),\n",
+ "[(323, 'Down Under Souvenirs, Inc', Decimal('75020.13')),\n",
" (141, 'Euro+ Shopping Channel', Decimal('46895.48')),\n",
" (209, 'Mini Caravy', Decimal('35157.75')),\n",
" (496, \"Kelly's Gift Shop\", Decimal('30253.75')),\n",
@@ -1542,7 +1553,10 @@
}
],
"source": [
- "q = Customer.aggr(Order * Order.Item & 'order_date between \"2005-05-01\" and \"2005-05-31\"', total_spent=\"quantity*price\")\n",
+ "q = Customer.aggr(\n",
+ " Order * Order.Item & 'order_date between \"2005-05-01\" and \"2005-05-31\"',\n",
+ " total_spent=\"quantity*price\",\n",
+ ")\n",
"\n",
"q.make_sql()"
]
@@ -1551,14 +1565,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 5 (app):** For each addon, show how many people have bought them."
+ "**Problem 5 (app):** For each addon, show how many people have bought them.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 6 (sales):** Show the top 5 employees by the sales they have made so far this year."
+ "**Problem 6 (sales):** Show the top 5 employees by the sales they have made so far this year.\n"
]
},
{
@@ -1681,7 +1695,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 7 (nations)**: Show the top 5 languages by the number of countries in which they are the official language. Include the number of countries."
+ "**Problem 7 (nations)**: Show the top 5 languages by the number of countries in which they are the official language. Include the number of countries.\n"
]
},
{
@@ -1886,7 +1900,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 8 (nations)**: Show the world population and and gdp for 2018."
+ "**Problem 8 (nations)**: Show the world population and and gdp for 2018.\n"
]
},
{
@@ -2066,14 +2080,14 @@
}
],
"source": [
- "dj.U('year').aggr(CountryStats, pop='sum(population)', gdp='sum(gdp)')"
+ "dj.U(\"year\").aggr(CountryStats, pop=\"sum(population)\", gdp=\"sum(gdp)\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 9 (nations)**: Show the world population and GDP for each year."
+ "**Problem 9 (nations)**: Show the world population and GDP for each year.\n"
]
},
{
@@ -2477,35 +2491,35 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 10 (nations)**: Show all continents, along with their populations and GDP in 2018."
+ "**Problem 10 (nations)**: Show all continents, along with their populations and GDP in 2018.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 11 (nations)**: Show all the countries in Africa with a population greater than 100,000,000 in 2018."
+ "**Problem 11 (nations)**: Show all the countries in Africa with a population greater than 100,000,000 in 2018.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 12 (university)**: Show the total number of students who have taken a math class."
+ "**Problem 12 (university)**: Show the total number of students who have taken a math class.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 13 (university)**: Show the top course by enrollment in the current term."
+ "**Problem 13 (university)**: Show the top course by enrollment in the current term.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 14 (hotel)**: Show the top five guests by the number of nights that they have stayed a the hotel."
+ "**Problem 14 (hotel)**: Show the top five guests by the number of nights that they have stayed a the hotel.\n"
]
},
{
@@ -2697,7 +2711,9 @@
}
],
"source": [
- "Guest.aggr(Reservation * CheckIn, ..., n='count(*)').fetch(order_by='n DESC', limit=5, format='frame')"
+ "Guest.aggr(Reservation * CheckIn, ..., n=\"count(*)\").fetch(\n",
+ " order_by=\"n DESC\", limit=5, format=\"frame\"\n",
+ ")"
]
},
{
@@ -2784,21 +2800,21 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 15 (nations):** Show all the regions and the average GDP per capita in each for 2018."
+ "**Problem 15 (nations):** Show all the regions and the average GDP per capita in each for 2018.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 16 (Sales):** Show the top five products by total sales (in dollars)"
+ "**Problem 16 (Sales):** Show the top five products by total sales (in dollars)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 17 (app):** Show the total sales by day over the last month."
+ "**Problem 17 (app):** Show the total sales by day over the last month.\n"
]
},
{
@@ -2894,7 +2910,7 @@
"\n",
"SELECT purchase_date, SUM(price) AS total_sales\n",
"FROM `purchase`\n",
- "NATRUAL JOIN `#add_on` \n",
+ "NATURAL JOIN `#add_on` \n",
"WHERE purchase_date >= CURDATE() - INTERVAL 1 MONTH\n",
"GROUP BY purchase_date\n",
"ORDER BY total_sales DESC\n",
@@ -2905,21 +2921,21 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 18 (university):** Show all the departments and the number of students electing them for their majors."
+ "**Problem 18 (university):** Show all the departments and the number of students electing them for their majors.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 19 (university):** Show all departments and the number of courses they offer in the current semester."
+ "**Problem 19 (university):** Show all departments and the number of courses they offer in the current semester.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 20 (university)** Show what fraction of student who declared \"MATH\" as their major each year."
+ "**Problem 20 (university)** Show what fraction of student who declared \"MATH\" as their major each year.\n"
]
},
{
@@ -2933,7 +2949,7 @@
"SELECT\n",
" YEAR(student_major.declare_date) AS declare_year,\n",
" round(avg(ifnull(dept='MATH', 0) )*100, 2) as percent_math_majors\n",
- "FROM student NATRUAL LEFT JOIN student_major\n",
+ "FROM student NATURAL LEFT JOIN student_major\n",
"GROUP BY declare_year\n",
"ORDER BY declare_year DESC\n",
"LIMIT 15;"
@@ -2943,21 +2959,21 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 21 (university)** Show all courses offered in the current term with at least five students enrolled."
+ "**Problem 21 (university)** Show all courses offered in the current term with at least five students enrolled.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "# Solutions"
+ "# Solutions\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "**Problem 15** - Show the world regions and the GDP per capita in each for 2018"
+ "**Problem 15** - Show the world regions and the GDP per capita in each for 2018\n"
]
},
{
@@ -2966,9 +2982,9 @@
"metadata": {},
"outputs": [],
"source": [
- "stats2018 = Countries.proj(..., country_name=\"name\") * CountryStats() & 'year=2018'\n",
+ "stats2018 = Countries.proj(..., country_name=\"name\") * CountryStats() & \"year=2018\"\n",
"\n",
- "Regions.aggr(stats2018 , 'name', gdp_per_capita = 'sum(gdp) / sum(population)') "
+ "Regions.aggr(stats2018, \"name\", gdp_per_capita=\"sum(gdp) / sum(population)\")"
]
},
{
@@ -2981,11 +2997,14 @@
},
"outputs": [],
"source": [
- "%%sql\n",
+ "% % sql\n",
"-- Show the world's GDP per capita for 2018\n",
"use nation;\n",
"\n",
- "SELECT sum(gdp) / sum(population) as gdp_per_capital FROM country_stats WHERE year=2018"
+ "SELECT sum(gdp) / sum(population) as gdp_per_capital\n",
+ "FROM country_stats\n",
+ "WHERE\n",
+ " year = 2018"
]
},
{
@@ -2998,12 +3017,17 @@
},
"outputs": [],
"source": [
- "%%sql\n",
- "use nation; \n",
+ "% % sql use nation;\n",
"-- Show the world's GDP per capita for 2018 by region\n",
- "SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita \n",
- "FROM regions JOIN countries using (region_id) NATURAL JOIN country_stats \n",
- "WHERE year=2018 GROUP BY region_id\n",
+ "SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita\n",
+ "FROM\n",
+ " regions\n",
+ " JOIN countries using (region_id)\n",
+ " NATURAL JOIN country_stats\n",
+ "WHERE\n",
+ " year = 2018\n",
+ "GROUP BY\n",
+ " region_id\n",
"ORDER BY gdp_per_capita DESC"
]
},
@@ -3017,16 +3041,22 @@
},
"outputs": [],
"source": [
- "%%sql\n",
+ "% % sql\n",
"-- show all the regions with GDP per capita over 25,000 in 2018\n",
"use nation;\n",
"\n",
- "SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita \n",
- "FROM regions JOIN countries using (region_id) NATURAL JOIN country_stats \n",
- "WHERE year=2018 \n",
- "GROUP BY region_id\n",
- "HAVING gdp_per_capita > 25000\n",
- "ORDER BY gdp_per_capita DESC\n"
+ "SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita\n",
+ "FROM\n",
+ " regions\n",
+ " JOIN countries using (region_id)\n",
+ " NATURAL JOIN country_stats\n",
+ "WHERE\n",
+ " year = 2018\n",
+ "GROUP BY\n",
+ " region_id\n",
+ "HAVING\n",
+ " gdp_per_capita > 25000\n",
+ "ORDER BY gdp_per_capita DESC"
]
},
{
@@ -3039,15 +3069,23 @@
},
"outputs": [],
"source": [
- "%%sql\n",
+ "% % sql\n",
"\n",
- "SELECT * FROM (\n",
- " SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita \n",
- " FROM regions JOIN countries using (region_id) NATURAL JOIN country_stats \n",
- " WHERE year=2018 \n",
- " GROUP BY region_id\n",
- " ORDER BY gdp_per_capita DESC) as q\n",
- "WHERE gdp_per_capita > 25000"
+ "SELECT *\n",
+ "FROM (\n",
+ " SELECT regions.name, sum(gdp) / sum(population) as gdp_per_capita\n",
+ " FROM\n",
+ " regions\n",
+ " JOIN countries using (region_id)\n",
+ " NATURAL JOIN country_stats\n",
+ " WHERE\n",
+ " year = 2018\n",
+ " GROUP BY\n",
+ " region_id\n",
+ " ORDER BY gdp_per_capita DESC\n",
+ " ) as q\n",
+ "WHERE\n",
+ " gdp_per_capita > 25000"
]
},
{
diff --git a/db-course/007-Aggregation.ipynb b/db-course/007-Aggregation.ipynb
index 76fd0b4..369eaf6 100644
--- a/db-course/007-Aggregation.ipynb
+++ b/db-course/007-Aggregation.ipynb
@@ -15,6 +15,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -38,19 +39,19 @@
"source": [
"import datajoint as dj\n",
"\n",
- "sales = dj.Schema('classicsales')\n",
+ "sales = dj.Schema(\"classicsales\")\n",
"sales.spawn_missing_classes()\n",
"\n",
- "nations = dj.Schema('nation')\n",
+ "nations = dj.Schema(\"nation\")\n",
"nations.spawn_missing_classes()\n",
"\n",
- "hotel = dj.Schema('hotel')\n",
+ "hotel = dj.Schema(\"hotel\")\n",
"hotel.spawn_missing_classes()\n",
"\n",
- "university = dj.Schema('university')\n",
+ "university = dj.Schema(\"university\")\n",
"university.spawn_missing_classes()\n",
"\n",
- "app = dj.Schema('app')\n",
+ "app = dj.Schema(\"app\")\n",
"app.spawn_missing_classes()"
]
},
@@ -80,7 +81,8 @@
"outputs": [],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('app')\n",
+ "\n",
+ "schema = dj.Schema(\"app\")\n",
"schema.spawn_missing_classes()\n",
"dj.Diagram(schema)"
]
@@ -92,6 +94,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -227,7 +230,7 @@
"outputs": [],
"source": [
"# show average male age\n",
- "dj.U().aggr(Account & 'sex=\"M\"' , avg_age=\"floor(avg(DATEDIFF(now(), dob)) / 365.25)\")"
+ "dj.U().aggr(Account & 'sex=\"M\"', avg_age=\"floor(avg(DATEDIFF(now(), dob)) / 365.25)\")"
]
},
{
@@ -408,7 +411,7 @@
"2. With `GROUP BY`, the grouping attributes become the new primary key of the result. \n",
"3. Do not mix aggregated and non-aggregated values in the result with or without a `GROUP BY`.\n",
"4. `HAVING` plays the same role as the `WHERE` clause in a nesting outer query so it can use the output of the aggregation functions.\n",
- "5. `LEFT JOIN` is often follwed with a `GROUP BY` by the primary key attributes of the left table. In this scenario the entities in the right table are aggregated for each matching row in the left table.\n"
+ "5. `LEFT JOIN` is often followed with a `GROUP BY` by the primary key attributes of the left table. In this scenario the entities in the right table are aggregated for each matching row in the left table.\n"
]
},
{
@@ -706,7 +709,9 @@
],
"source": [
"# Show all the orders made in March 2003 and the total number of items on each\n",
- "(Order & 'order_date between \"2003-03-01\" and \"2003-03-31\"').aggr(Order.Item(), n='count(*)', keep_all_rows=True)"
+ "(Order & 'order_date between \"2003-03-01\" and \"2003-03-31\"').aggr(\n",
+ " Order.Item(), n=\"count(*)\", keep_all_rows=True\n",
+ ")"
]
},
{
@@ -811,7 +816,7 @@
"source": [
"# SHOW ALL the employees, the number of their direct reports\n",
"\n",
- "Employee.proj(reports_to='employee_number').aggr(Report, n='count(employee_number)')"
+ "Employee.proj(reports_to=\"employee_number\").aggr(Report, n=\"count(employee_number)\")"
]
},
{
diff --git a/db-course/007-Compute-JuliaSets.ipynb b/db-course/007-Compute-JuliaSets.ipynb
index e96cc5d..9f627d6 100644
--- a/db-course/007-Compute-JuliaSets.ipynb
+++ b/db-course/007-Compute-JuliaSets.ipynb
@@ -32,8 +32,8 @@
"source": [
"def julia(c, size=256, center=(0.0, 0.0), zoom=1.0, iters=256):\n",
" x, y = np.meshgrid(\n",
- " np.linspace(-1, 1, size)/zoom + center[0], \n",
- " np.linspace(-1, 1, size)/zoom + center[1], \n",
+ " np.linspace(-1, 1, size) / zoom + center[0],\n",
+ " np.linspace(-1, 1, size) / zoom + center[1],\n",
" )\n",
" z = x + 1j * y\n",
" im = np.zeros(z.shape)\n",
@@ -63,7 +63,7 @@
}
],
"source": [
- "plt.imshow(julia(-0.4+0.6j), cmap='magma')\n",
+ "plt.imshow(julia(-0.4 + 0.6j), cmap=\"magma\")\n",
"plt.axis(False);"
]
},
@@ -85,7 +85,7 @@
}
],
"source": [
- "plt.imshow(julia(-0.4+0.6j, center=(0.34, -0.30), zoom=10000.0), cmap='magma')\n",
+ "plt.imshow(julia(-0.4 + 0.6j, center=(0.34, -0.30), zoom=10000.0), cmap=\"magma\")\n",
"plt.axis(False);"
]
},
@@ -97,9 +97,9 @@
"outputs": [],
"source": [
"c = (\n",
- " -0.4 + 0.6j, \n",
- " -0.74543 + 0.11301j, \n",
- " -0.75 + 0.11j, \n",
+ " -0.4 + 0.6j,\n",
+ " -0.74543 + 0.11301j,\n",
+ " -0.75 + 0.11j,\n",
" -0.1 + 0.651j,\n",
" -0.835 - 0.2321j,\n",
" -0.70176 - 0.3842j,\n",
@@ -128,9 +128,9 @@
"\n",
"fig, ax = plt.subplots(3, 2, figsize=(7.5, 12))\n",
"for c_, a in zip(c, ax.flatten()):\n",
- " img = julia(c_, zoom=0.5) \n",
+ " img = julia(c_, zoom=0.5)\n",
" img += np.random.randn(*img.shape) * noise_level\n",
- " a.imshow(img, cmap='magma')\n",
+ " a.imshow(img, cmap=\"magma\")\n",
" a.axis(False)"
]
},
@@ -172,7 +172,7 @@
"outputs": [],
"source": [
"noise_level = 50.0\n",
- "img = julia(-0.4+0.6j, size=200)\n",
+ "img = julia(-0.4 + 0.6j, size=200)\n",
"noise_img = img + np.random.randn(*img.shape) * noise_level\n",
"median_img = filters.median(noise_img, disk(3))\n",
"tv_img = restoration.denoise_tv_chambolle(noise_img, weight=20.0)\n",
@@ -203,13 +203,16 @@
"fig, ax = plt.subplots(3, 2, figsize=(6, 9))\n",
"for a, (im, title) in zip(\n",
" ax.flatten(),\n",
- " ((img, 'original'), \n",
- " (noise_img, 'original+noise'),\n",
- " (gaussian_img, 'gaussian'),\n",
- " (median_img, 'median'), \n",
- " (wavelet_img, 'wavelet'),\n",
- " (tv_img, 'tv'), )):\n",
- " a.imshow(im, cmap='magma', vmin=0, vmax=255)\n",
+ " (\n",
+ " (img, \"original\"),\n",
+ " (noise_img, \"original+noise\"),\n",
+ " (gaussian_img, \"gaussian\"),\n",
+ " (median_img, \"median\"),\n",
+ " (wavelet_img, \"wavelet\"),\n",
+ " (tv_img, \"tv\"),\n",
+ " ),\n",
+ "):\n",
+ " a.imshow(im, cmap=\"magma\", vmin=0, vmax=255)\n",
" a.axis(False)\n",
" a.set_title(title)"
]
@@ -242,7 +245,7 @@
"source": [
"import datajoint as dj\n",
"\n",
- "schema = dj.Schema('julia')"
+ "schema = dj.Schema(\"julia\")"
]
},
{
@@ -252,7 +255,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class JuliaSpec(dj.Lookup):\n",
" definition = \"\"\"\n",
" julia_spec : smallint \n",
@@ -264,17 +267,16 @@
" center_imag=0.0 : float\n",
" zoom=1.0 : float\n",
" noise_level=50 : float\n",
- " \"\"\" \n",
+ " \"\"\"\n",
"\n",
- " contents = (\n",
+ " contents = (\n",
" dict(julia_spec=0, creal=-0.4, cimag=0.6, noise_level=50),\n",
" dict(julia_spec=1, creal=-0.7453, cimag=0.11301, noise_level=50),\n",
" dict(julia_spec=2, creal=-0.75, cimag=0.11, noise_level=50),\n",
" dict(julia_spec=3, creal=-0.1, cimag=0.651, noise_level=50),\n",
" dict(julia_spec=4, creal=-0.835, cimag=-0.2321, noise_level=50),\n",
" dict(julia_spec=5, creal=-0.70176, cimag=-0.3842, noise_level=50),\n",
- " )\n",
- "\n"
+ " )"
]
},
{
@@ -285,12 +287,16 @@
"outputs": [],
"source": [
"JuliaSpec.insert1(\n",
- " dict(julia_spec=10, \n",
- " creal=-0.4, cimag=0.6, \n",
- " center_real=0.34, center_imag=-0.30, \n",
- " zoom=10000.0, \n",
- " noise_level=50.0)\n",
- ") "
+ " dict(\n",
+ " julia_spec=10,\n",
+ " creal=-0.4,\n",
+ " cimag=0.6,\n",
+ " center_real=0.34,\n",
+ " center_imag=-0.30,\n",
+ " zoom=10000.0,\n",
+ " noise_level=50.0,\n",
+ " )\n",
+ ")"
]
},
{
@@ -310,12 +316,13 @@
"\n",
" def make(self, key):\n",
" spec = (JuliaSpec & key).fetch1()\n",
- " img = julia(spec['creal'] + 1j*spec['cimag'], \n",
- " size=spec['size'],\n",
- " center=(spec['center_real'], spec['center_imag']),\n",
- " zoom=spec['zoom'],\n",
- " )\n",
- " img += np.random.randn(*img.shape) * spec['noise_level']\n",
+ " img = julia(\n",
+ " spec[\"creal\"] + 1j * spec[\"cimag\"],\n",
+ " size=spec[\"size\"],\n",
+ " center=(spec[\"center_real\"], spec[\"center_imag\"]),\n",
+ " zoom=spec[\"zoom\"],\n",
+ " )\n",
+ " img += np.random.randn(*img.shape) * spec[\"noise_level\"]\n",
" self.insert1(dict(key, image=img.astype(np.float32)))"
]
},
@@ -511,7 +518,7 @@
}
],
"source": [
- "plt.imshow((JuliaImage & 'julia_spec=2').fetch1('image'))\n",
+ "plt.imshow((JuliaImage & \"julia_spec=2\").fetch1(\"image\"))\n",
"plt.axis(False);"
]
},
@@ -583,12 +590,11 @@
" params=null : blob\n",
" \"\"\"\n",
" contents = (\n",
- " (0, 'gaussian', dict(sigma=1.8)),\n",
- " (1, 'median', dict(radius=3)),\n",
- " (2, 'wavelet', {}),\n",
- " (3, 'tv', dict(weight=20.0))\n",
- " )\n",
- "\n"
+ " (0, \"gaussian\", dict(sigma=1.8)),\n",
+ " (1, \"median\", dict(radius=3)),\n",
+ " (2, \"wavelet\", {}),\n",
+ " (3, \"tv\", dict(weight=20.0)),\n",
+ " )"
]
},
{
@@ -598,7 +604,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class JuliaDenoised(dj.Computed):\n",
" definition = \"\"\"\n",
" -> JuliaImage\n",
@@ -608,14 +614,14 @@
" \"\"\"\n",
"\n",
" def make(self, key):\n",
- " img = (JuliaImage & key).fetch1('image')\n",
- " method, params = (DenoiseMethod & key).fetch1('method', 'params')\n",
+ " img = (JuliaImage & key).fetch1(\"image\")\n",
+ " method, params = (DenoiseMethod & key).fetch1(\"method\", \"params\")\n",
"\n",
" if method == \"gaussian\":\n",
" result = filters.gaussian(img, **params)\n",
" elif method == \"median\":\n",
- " result = filters.median(img, disk(params['radius']))\n",
- " elif method == 'tv':\n",
+ " result = filters.median(img, disk(params[\"radius\"]))\n",
+ " elif method == \"tv\":\n",
" result = restoration.denoise_tv_chambolle(img, **params)\n",
" elif method == \"wavelet\":\n",
" result = restoration.denoise_wavelet(noise_img, **params)\n",
@@ -792,8 +798,8 @@
}
],
"source": [
- "keys = JuliaDenoised.fetch('KEY')\n",
- "img = ((JuliaDenoised & keys[21])).fetch1('denoised_image')\n",
+ "keys = JuliaDenoised.fetch(\"KEY\")\n",
+ "img = ((JuliaDenoised & keys[21])).fetch1(\"denoised_image\")\n",
"plt.imshow(img)\n",
"plt.axis(False);"
]
@@ -901,7 +907,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Blob.insert1(dict(id=1, blob=[1, 2, 3, 'Four']))"
+ "Blob.insert1(dict(id=1, blob=[1, 2, 3, \"Four\"]))"
]
},
{
diff --git a/db-course/008-Default.ipynb b/db-course/008-Default.ipynb
index ade0a0c..a69a1ff 100644
--- a/db-course/008-Default.ipynb
+++ b/db-course/008-Default.ipynb
@@ -17,6 +17,7 @@
"source": [
"import datajoint as dj\n",
"import faker\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -36,7 +37,7 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.Schema('defaults')"
+ "schema = dj.Schema(\"defaults\")"
]
},
{
@@ -206,8 +207,9 @@
"metadata": {},
"outputs": [],
"source": [
- "import pymysql \n",
- "conn = pymysql.connect(user='root', passwd='simple', host='127.0.0.1')\n",
+ "import pymysql\n",
+ "\n",
+ "conn = pymysql.connect(user=\"root\", passwd=\"simple\", host=\"127.0.0.1\")\n",
"cursor = conn.cursor()"
]
},
@@ -218,7 +220,7 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute('SHOW CREATE TABLE defaults.person')\n",
+ "cursor.execute(\"SHOW CREATE TABLE defaults.person\")\n",
"print(cursor.fetchone()[1])"
]
},
@@ -229,9 +231,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT defaults.person (person_id, blood_group, name) VALUES (2, \"O+\", \"anne\")\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -241,9 +245,11 @@
"metadata": {},
"outputs": [],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT defaults.person (person_id, blood_group) VALUES (4, \"B+\")\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -273,9 +279,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT defaults.person (person_id, blood_group, name) VALUES (1, NULL, 'bob')\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
diff --git a/db-course/008-Transactions-HW.ipynb b/db-course/008-Transactions-HW.ipynb
index ee82baa..d2b5091 100644
--- a/db-course/008-Transactions-HW.ipynb
+++ b/db-course/008-Transactions-HW.ipynb
@@ -30,6 +30,7 @@
],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -211,7 +212,7 @@
"source": [
"import datajoint as dj\n",
"\n",
- "sales = dj.Schema('classicsales')\n",
+ "sales = dj.Schema(\"classicsales\")\n",
"sales.spawn_missing_classes()\n",
"\n",
"dj.Diagram(sales)"
diff --git a/db-course/008-Transactions.ipynb b/db-course/008-Transactions.ipynb
index 61a3d3c..2de6136 100644
--- a/db-course/008-Transactions.ipynb
+++ b/db-course/008-Transactions.ipynb
@@ -24,6 +24,7 @@
"outputs": [],
"source": [
"from faker import Faker\n",
+ "\n",
"fake = Faker()"
]
},
@@ -43,7 +44,8 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('bank')"
+ "\n",
+ "schema = dj.Schema(\"bank\")"
]
},
{
@@ -72,8 +74,10 @@
" dict(\n",
" account_number=fake.random.randint(10_000_000, 99_999_999),\n",
" customer_name=fake.name(),\n",
- " balance=fake.random.randint(0, 100_000_00)/100)\n",
- " for i in range(100))"
+ " balance=fake.random.randint(0, 100_000_00) / 100,\n",
+ " )\n",
+ " for i in range(100)\n",
+ ")"
]
},
{
@@ -213,7 +217,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Account.fetch('KEY')\n",
+ "keys = Account.fetch(\"KEY\")\n",
"account1 = fake.random.choice(keys)\n",
"account2 = fake.random.choice(keys)"
]
@@ -245,17 +249,15 @@
"outputs": [],
"source": [
"def transfer_bad(account1, account2, amount):\n",
- " \n",
- " current_balance = (Account & account1).fetch1('balance')\n",
+ " current_balance = (Account & account1).fetch1(\"balance\")\n",
" if current_balance < amount:\n",
- " raise RuntimeError('Insufficient funds')\n",
+ " raise RuntimeError(\"Insufficient funds\")\n",
"\n",
" Account.update1(dict(account1, balance=float(current_balance) - amount))\n",
"\n",
- " b = (Account & account2).fetch1('balance')\n",
- " \n",
- " Account.update1(dict(account2, balance= float(b) + amount))\n",
- " "
+ " b = (Account & account2).fetch1(\"balance\")\n",
+ "\n",
+ " Account.update1(dict(account2, balance=float(b) + amount))"
]
},
{
@@ -265,20 +267,17 @@
"outputs": [],
"source": [
"def transfer_good(account1, account2, amount):\n",
- " \n",
" conn = dj.conn()\n",
" with conn.transaction:\n",
- "\n",
- " current_balance = (Account & account1).fetch1('balance')\n",
+ " current_balance = (Account & account1).fetch1(\"balance\")\n",
" if current_balance < amount:\n",
- " raise RuntimeError('Insufficient funds')\n",
+ " raise RuntimeError(\"Insufficient funds\")\n",
"\n",
" Account.update1(dict(account1, balance=float(current_balance) - amount))\n",
"\n",
- " b = (Account & account2).fetch1('balance')\n",
- " \n",
- " Account.update1(dict(account2, balance= float(b) + amount))\n",
- " "
+ " b = (Account & account2).fetch1(\"balance\")\n",
+ "\n",
+ " Account.update1(dict(account2, balance=float(b) + amount))"
]
},
{
@@ -487,7 +486,7 @@
}
],
"source": [
- "Account & [account1, account2] "
+ "Account & [account1, account2]"
]
},
{
@@ -497,6 +496,7 @@
"outputs": [],
"source": [
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -564,7 +564,9 @@
"metadata": {},
"outputs": [],
"source": [
- "conn = pymysql.connect(user='root', host='127.0.0.1', password=\"simple\", autocommit=True)"
+ "conn = pymysql.connect(\n",
+ " user=\"root\", host=\"127.0.0.1\", password=\"simple\", autocommit=True\n",
+ ")"
]
},
{
@@ -585,27 +587,41 @@
],
"source": [
"cursor = conn.cursor()\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" SELECT balance FROM bank.account \n",
" WHERE account_number = %s\n",
- " \"\"\", (account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (account1[\"account_number\"],),\n",
+ ")\n",
"\n",
"amount = 100\n",
"\n",
"current_balance = cursor.fetchone()\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE bank.account \n",
" SET balance = balance - %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account1[\"account_number\"],\n",
+ " ),\n",
+ ")\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE bank.account \n",
" SET balance = balance + %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account2[\"account_number\"],))\n",
- "\n"
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account2[\"account_number\"],\n",
+ " ),\n",
+ ")"
]
},
{
@@ -615,39 +631,52 @@
"outputs": [],
"source": [
"def transfer(cursor, account1, account2, amount):\n",
- " \n",
" cursor.execute(\"BEGIN TRANSACTION\")\n",
- " \n",
+ "\n",
" try:\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" SELECT balance FROM bank.account \n",
" WHERE account_number = %s\n",
- " \"\"\", (account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (account1[\"account_number\"],),\n",
+ " )\n",
"\n",
" current_balance = cursor.fetchone()\n",
"\n",
" if current_balance < amount:\n",
- " raise RuntimeError('Insufficient funds')\n",
+ " raise RuntimeError(\"Insufficient funds\")\n",
"\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE shared_bank.account \n",
" SET balance = balance - %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account1[\"account_number\"],\n",
+ " ),\n",
+ " )\n",
"\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE shared_bank.account \n",
" SET balance = balance + %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account2[\"account_number\"],))\n",
- " \n",
- " except: \n",
- " cursor.execute('CANCEL TRANSACTION')\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account2[\"account_number\"],\n",
+ " ),\n",
+ " )\n",
+ "\n",
+ " except:\n",
+ " cursor.execute(\"CANCEL TRANSACTION\")\n",
" raise\n",
- " \n",
+ "\n",
" else:\n",
- " cursor.execute('COMMIT')\n",
- "\n"
+ " cursor.execute(\"COMMIT\")"
]
},
{
@@ -689,9 +718,9 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.schema('dimitri_patterns')\n",
+ "schema = dj.schema(\"dimitri_patterns\")\n",
"schema.drop()\n",
- "schema = dj.schema('dimitri_patterns')"
+ "schema = dj.schema(\"dimitri_patterns\")"
]
},
{
@@ -786,7 +815,8 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Shipment2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -795,7 +825,8 @@
" ->[unique] Order2\n",
" ship_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Confirm2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -830,7 +861,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Order * Shipment * Confirm "
+ "Order * Shipment * Confirm"
]
},
{
@@ -856,8 +887,7 @@
" ---\n",
" species = 'mouse' : enum('human', 'mouse', 'rat', 'worm')\n",
" sex : enum('F', 'M', 'unknown')\n",
- " \"\"\"\n",
- " "
+ " \"\"\""
]
},
{
@@ -875,7 +905,9 @@
"metadata": {},
"outputs": [],
"source": [
- "Subject.insert1(dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True)\n",
+ "Subject.insert1(\n",
+ " dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True\n",
+ ")\n",
"Subject.insert1(dict(subject_id=2, species=\"mouse\", sex=\"F\"), skip_duplicates=True)\n",
"Subject.insert1(dict(subject_id=3, species=\"worm\", sex=\"M\"), skip_duplicates=True)"
]
@@ -990,7 +1022,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Cell & (Subject & {'sex': \"M\"})"
+ "Cell & (Subject & {\"sex\": \"M\"})"
]
},
{
@@ -1009,6 +1041,7 @@
" sex : enum('F', 'M', 'unknown')\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Session2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1017,7 +1050,8 @@
" -> Subject2\n",
" session_timestamp = CURRENT_TIMESTAMP : timestamp\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Scan2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1026,7 +1060,8 @@
" -> Session2\n",
" laser_power : float # mW\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Cell2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1072,7 +1107,7 @@
"source": [
"# Give me cells for subject_id=1\n",
"\n",
- "Cell2 & (Scan2 & (Session2 & 'subject_id=2'))"
+ "Cell2 & (Scan2 & (Session2 & \"subject_id=2\"))"
]
},
{
@@ -1139,7 +1174,7 @@
" ---\n",
" method_name : varchar(16)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = ((1, \"sharpen\"), (2, \"contrast\"))"
]
},
@@ -1273,7 +1308,8 @@
" ---\n",
" hire_date : date \n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Instructor(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1281,7 +1317,8 @@
" ---\n",
" department : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1342,7 +1379,8 @@
" definition = \"\"\"\n",
" neuron : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Synapse(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1423,7 +1461,7 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Item(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -1463,12 +1501,10 @@
" state : varchar(30)\n",
" \"\"\"\n",
"\n",
- "State.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))"
+ "\n",
+ "State.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")"
]
},
{
@@ -1499,7 +1535,7 @@
"City.insert1((\"WA\", \"Seattle\", None))\n",
"City.insert1((\"TX\", \"Austin\", \"YES\"))\n",
"City.insert1((\"TX\", \"Houston\", None))\n",
- "City.insert1((\"WA\", \"Olympia\", \"YES\"))\n"
+ "City.insert1((\"WA\", \"Olympia\", \"YES\"))"
]
},
{
@@ -1524,14 +1560,16 @@
" ---\n",
" state_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class City2(dj.Manual):\n",
" definition = \"\"\"\n",
" -> State2\n",
" city_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Capital2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1550,12 +1588,9 @@
"State2.delete_quick()\n",
"City2.delete_quick()\n",
"\n",
- "State2.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))\n",
+ "State2.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")\n",
"\n",
"City2.insert1((\"WA\", \"Seattle\"))\n",
"City2.insert1((\"TX\", \"Austin\"))\n",
@@ -1695,9 +1730,9 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.Schema('shared_vet')\n",
+ "schema = dj.Schema(\"shared_vet\")\n",
"schema.drop()\n",
- "schema = dj.Schema('shared_vet')"
+ "schema = dj.Schema(\"shared_vet\")"
]
},
{
@@ -1726,7 +1761,7 @@
" definition = \"\"\"\n",
" species : varchar(30)\n",
" \"\"\"\n",
- " contents = (('cat',), ('dog',), ('ferret',), ('parrot',))"
+ " contents = ((\"cat\",), (\"dog\",), (\"ferret\",), (\"parrot\",))"
]
},
{
@@ -1863,7 +1898,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
" student_id : int\n",
diff --git a/db-course/009-DesignPatterns.ipynb b/db-course/009-DesignPatterns.ipynb
index 52ce344..034ea01 100644
--- a/db-course/009-DesignPatterns.ipynb
+++ b/db-course/009-DesignPatterns.ipynb
@@ -47,7 +47,7 @@
}
],
"source": [
- "schema = dj.schema('dimitri_patterns')"
+ "schema = dj.schema(\"dimitri_patterns\")"
]
},
{
@@ -293,7 +293,8 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Shipment2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -302,7 +303,8 @@
" ->[unique] Order2\n",
" ship_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Confirm2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -620,7 +622,7 @@
}
],
"source": [
- "Order * Shipment * Confirm "
+ "Order * Shipment * Confirm"
]
},
{
@@ -646,8 +648,7 @@
" ---\n",
" species = 'mouse' : enum('human', 'mouse', 'rat', 'worm')\n",
" sex : enum('F', 'M', 'unknown')\n",
- " \"\"\"\n",
- " "
+ " \"\"\""
]
},
{
@@ -749,7 +750,9 @@
"metadata": {},
"outputs": [],
"source": [
- "Subject.insert1(dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True)\n",
+ "Subject.insert1(\n",
+ " dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True\n",
+ ")\n",
"Subject.insert1(dict(subject_id=2, species=\"mouse\", sex=\"F\"), skip_duplicates=True)\n",
"Subject.insert1(dict(subject_id=3, species=\"worm\", sex=\"M\"), skip_duplicates=True)"
]
@@ -1312,7 +1315,7 @@
}
],
"source": [
- "Cell & (Subject & {'sex': \"M\"})"
+ "Cell & (Subject & {\"sex\": \"M\"})"
]
},
{
@@ -1331,6 +1334,7 @@
" sex : enum('F', 'M', 'unknown')\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Session2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1339,7 +1343,8 @@
" -> Subject2\n",
" session_timestamp = CURRENT_TIMESTAMP : timestamp\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Scan2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1348,7 +1353,8 @@
" -> Session2\n",
" laser_power : float # mW\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Cell2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1656,7 +1662,7 @@
"source": [
"# Give me cells for subject_id=1\n",
"\n",
- "Cell2 & (Scan2 & (Session2 & 'subject_id=2'))"
+ "Cell2 & (Scan2 & (Session2 & \"subject_id=2\"))"
]
},
{
@@ -1723,7 +1729,7 @@
" ---\n",
" method_name : varchar(16)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = ((1, \"sharpen\"), (2, \"contrast\"))"
]
},
@@ -2151,7 +2157,8 @@
" ---\n",
" hire_date : date \n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Instructor(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -2159,7 +2166,8 @@
" ---\n",
" department : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -2969,7 +2977,8 @@
" definition = \"\"\"\n",
" neuron : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Synapse(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -3985,7 +3994,7 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Item(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -4025,12 +4034,10 @@
" state : varchar(30)\n",
" \"\"\"\n",
"\n",
- "State.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))"
+ "\n",
+ "State.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")"
]
},
{
@@ -4061,7 +4068,7 @@
"City.insert1((\"WA\", \"Seattle\", None))\n",
"City.insert1((\"TX\", \"Austin\", \"YES\"))\n",
"City.insert1((\"TX\", \"Houston\", None))\n",
- "City.insert1((\"WA\", \"Olympia\", \"YES\"))\n"
+ "City.insert1((\"WA\", \"Olympia\", \"YES\"))"
]
},
{
@@ -4181,14 +4188,16 @@
" ---\n",
" state_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class City2(dj.Manual):\n",
" definition = \"\"\"\n",
" -> State2\n",
" city_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Capital2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -4207,12 +4216,9 @@
"State2.delete_quick()\n",
"City2.delete_quick()\n",
"\n",
- "State2.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))\n",
+ "State2.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")\n",
"\n",
"City2.insert1((\"WA\", \"Seattle\"))\n",
"City2.insert1((\"TX\", \"Austin\"))\n",
@@ -5059,7 +5065,7 @@
}
],
"source": [
- "schema = dj.Schema('vet')"
+ "schema = dj.Schema(\"vet\")"
]
},
{
@@ -5088,7 +5094,7 @@
" definition = \"\"\"\n",
" species : varchar(30)\n",
" \"\"\"\n",
- " contents = (('cat',), ('dog',), ('ferret',), ('parrot',))"
+ " contents = ((\"cat\",), (\"dog\",), (\"ferret\",), (\"parrot\",))"
]
},
{
@@ -5579,7 +5585,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
" student_id : int\n",
diff --git a/db-course/009-RelationalDivision.ipynb b/db-course/009-RelationalDivision.ipynb
index 0f160d6..ed3283a 100644
--- a/db-course/009-RelationalDivision.ipynb
+++ b/db-course/009-RelationalDivision.ipynb
@@ -6,16 +6,18 @@
"source": [
"# Relational Division\n",
"\n",
- "Relational division is a query of the type: \n",
- "> \"Find all entries in A that have a mathcing entry in B for each entry in C.\"\n",
+ "Relational division is a query of the type:\n",
+ "\n",
+ "> \"Find all entries in A that have a matching entry in B for each entry in C.\"\n",
+ "\n",
+ "For example,\n",
"\n",
- "For example, \n",
"> \"Show all the job candidates who have all the skills for a job posting.\"\n",
"\n",
"> \"Show all students who have completed all the required courses for a math major.\"\n",
"\n",
- "Relational division is often difficult to think through with no direct syntax in DataJoint and SQL. \n",
- "Let's review a detailed example in both DataJoint and SQL"
+ "Relational division is often difficult to think through with no direct syntax in DataJoint and SQL.\n",
+ "Let's review a detailed example in both DataJoint and SQL\n"
]
},
{
@@ -27,10 +29,12 @@
"# prepare datajoint\n",
"import datajoint as dj\n",
"from faker import Faker\n",
+ "\n",
"fake = Faker()\n",
"\n",
"# prepare SQL Magic\n",
"import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()\n",
"%load_ext sql\n",
"%config SqlMagic.autocommit=True\n",
@@ -44,8 +48,8 @@
"# Hiring Pipeline Database\n",
"\n",
"In this database, we will represent a set of `Skill`s (e.g. programming languages). \n",
- "We also have a set of job `Seeker`s, each possessing a set of skills. \n",
- "The database also has `Job` postings, each requiring a specific set of skills. \n"
+ "We also have a set of job `Seeker`s, each possessing a set of skills.\n",
+ "The database also has `Job` postings, each requiring a specific set of skills.\n"
]
},
{
@@ -63,7 +67,7 @@
}
],
"source": [
- "schema = dj.Schema('hiring')"
+ "schema = dj.Schema(\"hiring\")"
]
},
{
@@ -77,14 +81,7 @@
" definition = \"\"\"\n",
" skill : varchar(24)\n",
" \"\"\"\n",
- " contents = zip(\n",
- " (\"SQL\", \n",
- " \"Java\", \n",
- " \"Python\", \n",
- " \"C++\", \n",
- " \"JavaScript\", \n",
- " \"R\", \n",
- " \"Rust\"))"
+ " contents = zip((\"SQL\", \"Java\", \"Python\", \"C++\", \"JavaScript\", \"R\", \"Rust\"))"
]
},
{
@@ -195,7 +192,7 @@
" ---\n",
" name : varchar(60)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Skill(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -209,8 +206,9 @@
"metadata": {},
"outputs": [],
"source": [
- "Seeker.insert(((fake.random_int(), fake.name()) for _ in range(300)), \n",
- " skip_duplicates=True)"
+ "Seeker.insert(\n",
+ " ((fake.random_int(), fake.name()) for _ in range(300)), skip_duplicates=True\n",
+ ")"
]
},
{
@@ -458,7 +456,7 @@
],
"source": [
"# random subsets of skills for each seeker\n",
- "Seeker * Skill & 'rand() < 0.3'"
+ "Seeker * Skill & \"rand() < 0.3\""
]
},
{
@@ -467,9 +465,8 @@
"metadata": {},
"outputs": [],
"source": [
- "# server-side insert \n",
- "Seeker.Skill.insert(\n",
- " Seeker.proj() * Skill & 'RAND() < 0.3')"
+ "# server-side insert\n",
+ "Seeker.Skill.insert(Seeker.proj() * Skill & \"RAND() < 0.3\")"
]
},
{
@@ -735,15 +732,10 @@
"# insert two jobs\n",
"\n",
"Job.insert1((\"job1\", \"Software Engineer I\"))\n",
- "Job.Skill.insert((\n",
- " (\"job1\", \"Rust\"),\n",
- " (\"job1\", \"JavaScript\"),\n",
- " (\"job1\", \"Java\")))\n",
+ "Job.Skill.insert(((\"job1\", \"Rust\"), (\"job1\", \"JavaScript\"), (\"job1\", \"Java\")))\n",
"\n",
"Job.insert1((\"job2\", \"Data Scientist II\"))\n",
- "Job.Skill.insert((\n",
- " (\"job2\", \"SQL\"),\n",
- " (\"job2\", \"Python\")))"
+ "Job.Skill.insert(((\"job2\", \"SQL\"), (\"job2\", \"Python\")))"
]
},
{
@@ -948,7 +940,7 @@
"\n",
"This is described as the relational division of seeker skills by job skills.\n",
"\n",
- "> Show all seekers who have *all* the skills required for job \"Job1\""
+ "> Show all seekers who have _all_ the skills required for job \"Job1\"\n"
]
},
{
@@ -959,7 +951,7 @@
"\n",
"> Show all seekers except those who are missing any of the skills required for Job1.\n",
"\n",
- "This allows us to break the problem into simpler subqueries."
+ "This allows us to break the problem into simpler subqueries.\n"
]
},
{
@@ -1057,7 +1049,7 @@
],
"source": [
"# skills for Job1\n",
- "required_skill = Job.Skill() & {'job': \"Job1\"}\n",
+ "required_skill = Job.Skill() & {\"job\": \"Job1\"}\n",
"required_skill"
]
},
@@ -1204,7 +1196,7 @@
}
],
"source": [
- "# show missing skills for all candidates. \n",
+ "# show missing skills for all candidates.\n",
"\n",
"missing_skill = (Seeker * required_skill) - Seeker.Skill()\n",
"\n",
@@ -1315,7 +1307,7 @@
}
],
"source": [
- "# No show candidates who don't have any missing skills. \n",
+ "# No show candidates who don't have any missing skills.\n",
"\n",
"Seeker - missing_skill.proj()"
]
@@ -1426,7 +1418,7 @@
"source": [
"# putting this all together as a self-contained query:\n",
"# Seekers who have all required skills for Job1.\n",
- "Seeker - ((Seeker.proj() * Job.Skill & {'job': \"Job1\"}) - Seeker.Skill)"
+ "Seeker - ((Seeker.proj() * Job.Skill & {\"job\": \"Job1\"}) - Seeker.Skill)"
]
},
{
@@ -1435,9 +1427,9 @@
"source": [
"# In SQL\n",
"\n",
- "Let's do the same in sql. \n",
+ "Let's do the same in sql.\n",
"\n",
- "Query 1: show all seekers' missing skills for Job1."
+ "Query 1: show all seekers' missing skills for Job1.\n"
]
},
{
@@ -1563,7 +1555,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Now show all the seekers who lack missing skills:"
+ "Now show all the seekers who lack missing skills:\n"
]
},
{
diff --git a/db-course/Default.ipynb b/db-course/Default.ipynb
index b9f4279..4f45a3b 100644
--- a/db-course/Default.ipynb
+++ b/db-course/Default.ipynb
@@ -18,6 +18,7 @@
"import datajoint as dj\n",
"import random\n",
"import faker\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -28,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.schema('dimitri_default')"
+ "schema = dj.schema(\"dimitri_default\")"
]
},
{
@@ -338,7 +339,7 @@
"metadata": {},
"outputs": [],
"source": [
- "import pymysql "
+ "import pymysql"
]
},
{
@@ -348,7 +349,9 @@
"metadata": {},
"outputs": [],
"source": [
- "conn = pymysql.connect(user='dimitri', passwd=dj.config['database.password'], host='db.ust-db.link')"
+ "conn = pymysql.connect(\n",
+ " user=\"dimitri\", passwd=dj.config[\"database.password\"], host=\"db.ust-db.link\"\n",
+ ")"
]
},
{
@@ -379,14 +382,16 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"CREATE TABLE dimitri_default.person2 (\n",
" person_id int NOT NULL,\n",
" blood_group enum('A+','A-','AB+','AB-','B+','B-','O+','O-', 'unknown') \n",
" NOT NULL DEFAULT 'unknown',\n",
" PRIMARY KEY(person_id)\n",
")\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -407,9 +412,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT dimitri_default.person2 (person_id, blood_group) VALUES (1, DEFAULT)\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -430,9 +437,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT dimitri_default.person2 (person_id, blood_group) VALUES (2, \"O+\")\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -462,9 +471,11 @@
}
],
"source": [
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
"INSERT dimitri_default.person2 (person_id, blood_group) VALUES (3, NULL)\n",
- "\"\"\")"
+ "\"\"\"\n",
+ ")"
]
},
{
@@ -586,11 +597,8 @@
" ---\n",
" dept_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
- " contents = [\n",
- " ('BIOL', \"Biology\"),\n",
- " ('MATH', \"Mathematics\") \n",
- " ]"
+ "\n",
+ " contents = [(\"BIOL\", \"Biology\"), (\"MATH\", \"Mathematics\")]"
]
},
{
@@ -606,8 +614,7 @@
" -> Person\n",
" ---\n",
" -> [nullable] Department\n",
- " \"\"\"\n",
- " "
+ " \"\"\""
]
},
{
diff --git a/db-course/DesignPatterns.ipynb b/db-course/DesignPatterns.ipynb
index 86c8590..eff7831 100644
--- a/db-course/DesignPatterns.ipynb
+++ b/db-course/DesignPatterns.ipynb
@@ -7,12 +7,14 @@
"outputs": [],
"source": [
"import json\n",
- "with open('cred.json') as f:\n",
+ "\n",
+ "with open(\"cred.json\") as f:\n",
" creds = json.load(f)\n",
"\n",
"connection_string = \"mysql://{user}:{password}@{host}\".format(**creds)\n",
"\n",
- "import pymysql \n",
+ "import pymysql\n",
+ "\n",
"pymysql.install_as_MySQLdb()"
]
},
@@ -80,9 +82,10 @@
],
"source": [
"import datajoint as dj\n",
- "schema = dj.Schema('dimitri_bank')\n",
+ "\n",
+ "schema = dj.Schema(\"dimitri_bank\")\n",
"schema.drop()\n",
- "schema = dj.Schema('dimitri_bank')"
+ "schema = dj.Schema(\"dimitri_bank\")"
]
},
{
@@ -111,8 +114,10 @@
" dict(\n",
" account_number=fake.random.randint(10_000_000, 99_999_999),\n",
" customer_name=fake.name(),\n",
- " balance=fake.random.randint(0, 100_000_00)/100)\n",
- " for i in range(100))"
+ " balance=fake.random.randint(0, 100_000_00) / 100,\n",
+ " )\n",
+ " for i in range(100)\n",
+ ")"
]
},
{
@@ -250,7 +255,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys =Account.fetch('KEY')\n",
+ "keys = Account.fetch(\"KEY\")\n",
"account1 = fake.random.choice(keys)\n",
"account2 = fake.random.choice(keys)"
]
@@ -282,20 +287,17 @@
"outputs": [],
"source": [
"def transfer(account1, account2, amount):\n",
- " \n",
" with Account.connection.transaction:\n",
- "\n",
- " current_balance = (Account & account1).fetch1('balance')\n",
+ " current_balance = (Account & account1).fetch1(\"balance\")\n",
" if current_balance < amount:\n",
- " raise RuntimeError('Insufficient funds')\n",
+ " raise RuntimeError(\"Insufficient funds\")\n",
"\n",
" Account.update1(dict(account1, balance=float(current_balance) - amount))\n",
"\n",
- " b = (Account & account2).fetch1('balance')\n",
+ " b = (Account & account2).fetch1(\"balance\")\n",
" assert False\n",
- " \n",
- " Account.update1(dict(account2, balance= float(b) + amount))\n",
- " "
+ "\n",
+ " Account.update1(dict(account2, balance=float(b) + amount))"
]
},
{
@@ -513,7 +515,7 @@
}
],
"source": [
- "Account & [account1, account2] "
+ "Account & [account1, account2]"
]
},
{
@@ -1259,7 +1261,8 @@
"outputs": [],
"source": [
"import json\n",
- "with open('cred.json') as f:\n",
+ "\n",
+ "with open(\"cred.json\") as f:\n",
" creds = json.load(f)"
]
},
@@ -1310,27 +1313,41 @@
],
"source": [
"cursor = conn.cursor()\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" SELECT balance FROM dimitri_bank.account \n",
" WHERE account_number = %s\n",
- " \"\"\", (account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (account1[\"account_number\"],),\n",
+ ")\n",
"\n",
"amount = 100\n",
"\n",
"current_balance = cursor.fetchone()\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE dimitri_bank.account \n",
" SET balance = balance - %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account1[\"account_number\"],\n",
+ " ),\n",
+ ")\n",
"\n",
- "cursor.execute(\"\"\"\n",
+ "cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE dimitri_bank.account \n",
" SET balance = balance + %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account2[\"account_number\"],))\n",
- "\n"
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account2[\"account_number\"],\n",
+ " ),\n",
+ ")"
]
},
{
@@ -1340,39 +1357,52 @@
"outputs": [],
"source": [
"def transfer(cursor, account1, account2, amount):\n",
- " \n",
" cursor.execute(\"BEGIN TRANSACTION\")\n",
- " \n",
+ "\n",
" try:\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" SELECT balance FROM shared_bank.account \n",
" WHERE account_number = %s\n",
- " \"\"\", (account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (account1[\"account_number\"],),\n",
+ " )\n",
"\n",
" current_balance = cursor.fetchone()\n",
"\n",
" if current_balance < amount:\n",
- " raise RuntimeError('Insufficient funds')\n",
+ " raise RuntimeError(\"Insufficient funds\")\n",
"\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE shared_bank.account \n",
" SET balance = balance - %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account1[\"account_number\"],))\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account1[\"account_number\"],\n",
+ " ),\n",
+ " )\n",
"\n",
- " cursor.execute(\"\"\"\n",
+ " cursor.execute(\n",
+ " \"\"\"\n",
" UPDATE shared_bank.account \n",
" SET balance = balance + %s\n",
" WHERE account_number = %s\n",
- " \"\"\", (amount, account2[\"account_number\"],))\n",
- " \n",
- " except: \n",
- " cursor.execute('CANCEL TRANSACTION')\n",
+ " \"\"\",\n",
+ " (\n",
+ " amount,\n",
+ " account2[\"account_number\"],\n",
+ " ),\n",
+ " )\n",
+ "\n",
+ " except:\n",
+ " cursor.execute(\"CANCEL TRANSACTION\")\n",
" raise\n",
- " \n",
+ "\n",
" else:\n",
- " cursor.execute('COMMIT')\n",
- "\n"
+ " cursor.execute(\"COMMIT\")"
]
},
{
@@ -1422,9 +1452,9 @@
}
],
"source": [
- "schema = dj.schema('dimitri_patterns')\n",
+ "schema = dj.schema(\"dimitri_patterns\")\n",
"schema.drop()\n",
- "schema = dj.schema('dimitri_patterns')"
+ "schema = dj.schema(\"dimitri_patterns\")"
]
},
{
@@ -1670,7 +1700,8 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Shipment2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1679,7 +1710,8 @@
" ->[unique] Order2\n",
" ship_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Confirm2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1997,7 +2029,7 @@
}
],
"source": [
- "Order * Shipment * Confirm "
+ "Order * Shipment * Confirm"
]
},
{
@@ -2023,8 +2055,7 @@
" ---\n",
" species = 'mouse' : enum('human', 'mouse', 'rat', 'worm')\n",
" sex : enum('F', 'M', 'unknown')\n",
- " \"\"\"\n",
- " "
+ " \"\"\""
]
},
{
@@ -2126,7 +2157,9 @@
"metadata": {},
"outputs": [],
"source": [
- "Subject.insert1(dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True)\n",
+ "Subject.insert1(\n",
+ " dict(subject_id=1, species=\"human\", sex=\"unknown\"), skip_duplicates=True\n",
+ ")\n",
"Subject.insert1(dict(subject_id=2, species=\"mouse\", sex=\"F\"), skip_duplicates=True)\n",
"Subject.insert1(dict(subject_id=3, species=\"worm\", sex=\"M\"), skip_duplicates=True)"
]
@@ -2689,7 +2722,7 @@
}
],
"source": [
- "Cell & (Subject & {'sex': \"M\"})"
+ "Cell & (Subject & {\"sex\": \"M\"})"
]
},
{
@@ -2708,6 +2741,7 @@
" sex : enum('F', 'M', 'unknown')\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Session2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -2716,7 +2750,8 @@
" -> Subject2\n",
" session_timestamp = CURRENT_TIMESTAMP : timestamp\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Scan2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -2725,7 +2760,8 @@
" -> Session2\n",
" laser_power : float # mW\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Cell2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -3033,7 +3069,7 @@
"source": [
"# Give me cells for subject_id=1\n",
"\n",
- "Cell2 & (Scan2 & (Session2 & 'subject_id=2'))"
+ "Cell2 & (Scan2 & (Session2 & \"subject_id=2\"))"
]
},
{
@@ -3100,7 +3136,7 @@
" ---\n",
" method_name : varchar(16)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = ((1, \"sharpen\"), (2, \"contrast\"))"
]
},
@@ -3528,7 +3564,8 @@
" ---\n",
" hire_date : date \n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Instructor(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -3536,7 +3573,8 @@
" ---\n",
" department : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -4346,7 +4384,8 @@
" definition = \"\"\"\n",
" neuron : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Synapse(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -5362,7 +5401,7 @@
" ---\n",
" order_date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Item(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -5402,12 +5441,10 @@
" state : varchar(30)\n",
" \"\"\"\n",
"\n",
- "State.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))"
+ "\n",
+ "State.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")"
]
},
{
@@ -5438,7 +5475,7 @@
"City.insert1((\"WA\", \"Seattle\", None))\n",
"City.insert1((\"TX\", \"Austin\", \"YES\"))\n",
"City.insert1((\"TX\", \"Houston\", None))\n",
- "City.insert1((\"WA\", \"Olympia\", \"YES\"))\n"
+ "City.insert1((\"WA\", \"Olympia\", \"YES\"))"
]
},
{
@@ -5558,14 +5595,16 @@
" ---\n",
" state_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class City2(dj.Manual):\n",
" definition = \"\"\"\n",
" -> State2\n",
" city_name : varchar(30)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Capital2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -5584,12 +5623,9 @@
"State2.delete_quick()\n",
"City2.delete_quick()\n",
"\n",
- "State2.insert((\n",
- " (\"WA\", \"Washington\"),\n",
- " (\"TX\", \"Texas\"),\n",
- " (\"AK\", \"Alaska\"),\n",
- " (\"LA\", \"Louisiana\")\n",
- "))\n",
+ "State2.insert(\n",
+ " ((\"WA\", \"Washington\"), (\"TX\", \"Texas\"), (\"AK\", \"Alaska\"), (\"LA\", \"Louisiana\"))\n",
+ ")\n",
"\n",
"City2.insert1((\"WA\", \"Seattle\"))\n",
"City2.insert1((\"TX\", \"Austin\"))\n",
@@ -6434,9 +6470,9 @@
}
],
"source": [
- "schema = dj.Schema('shared_vet')\n",
+ "schema = dj.Schema(\"shared_vet\")\n",
"schema.drop()\n",
- "schema = dj.Schema('shared_vet')"
+ "schema = dj.Schema(\"shared_vet\")"
]
},
{
@@ -6465,7 +6501,7 @@
" definition = \"\"\"\n",
" species : varchar(30)\n",
" \"\"\"\n",
- " contents = (('cat',), ('dog',), ('ferret',), ('parrot',))"
+ " contents = ((\"cat\",), (\"dog\",), (\"ferret\",), (\"parrot\",))"
]
},
{
@@ -6956,7 +6992,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class Student(dj.Manual):\n",
" definition = \"\"\"\n",
" student_id : int\n",
diff --git a/db-course/PersonAccount.ipynb b/db-course/PersonAccount.ipynb
index 06cb866..6a0b1e7 100644
--- a/db-course/PersonAccount.ipynb
+++ b/db-course/PersonAccount.ipynb
@@ -42,7 +42,7 @@
}
],
"source": [
- "schema = dj.Schema('dimitri_bank')"
+ "schema = dj.Schema(\"dimitri_bank\")"
]
},
{
@@ -59,8 +59,7 @@
" date_of_birth : date\n",
" full_name : varchar(30)\n",
" ssn : int\n",
- " \"\"\"\n",
- " "
+ " \"\"\""
]
},
{
@@ -70,6 +69,7 @@
"outputs": [],
"source": [
"import faker\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -90,11 +90,12 @@
"source": [
"Person.insert(\n",
" dict(\n",
- " person_id = random.randint(100_000_000, 999_999_999),\n",
- " date_of_birth = fake.date_of_birth(),\n",
- " full_name = fake.name(),\n",
- " ssn = random.randint(100_00_0000, 999_99_9999)\n",
- " ) for _ in range(3000)\n",
+ " person_id=random.randint(100_000_000, 999_999_999),\n",
+ " date_of_birth=fake.date_of_birth(),\n",
+ " full_name=fake.name(),\n",
+ " ssn=random.randint(100_00_0000, 999_99_9999),\n",
+ " )\n",
+ " for _ in range(3000)\n",
")"
]
},
@@ -263,7 +264,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Person.fetch(\"KEY\")\n"
+ "keys = Person.fetch(\"KEY\")"
]
},
{
@@ -312,8 +313,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Account.insert(\n",
- " dict(random.choice(keys), account=i) for i in range(300, 600))"
+ "Account.insert(dict(random.choice(keys), account=i) for i in range(300, 600))"
]
},
{
@@ -520,7 +520,8 @@
" full_name : varchar(30)\n",
" ssn : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Account2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -634,7 +635,8 @@
" full_name : varchar(30)\n",
" ssn : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Account3(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -807,7 +809,8 @@
" full_name : varchar(30)\n",
" ssn : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Account4(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -815,7 +818,8 @@
" --- \n",
" balance : decimal(10, 2)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class AccountPerson4(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -832,11 +836,12 @@
"source": [
"Person4.insert(\n",
" dict(\n",
- " person_id = random.randint(100_000_000, 999_999_999),\n",
- " date_of_birth = fake.date_of_birth(),\n",
- " full_name = fake.name(),\n",
- " ssn = random.randint(100_00_0000, 999_99_9999)\n",
- " ) for _ in range(3000)\n",
+ " person_id=random.randint(100_000_000, 999_999_999),\n",
+ " date_of_birth=fake.date_of_birth(),\n",
+ " full_name=fake.name(),\n",
+ " ssn=random.randint(100_00_0000, 999_99_9999),\n",
+ " )\n",
+ " for _ in range(3000)\n",
")"
]
},
@@ -848,9 +853,10 @@
"source": [
"Account4.insert(\n",
" dict(\n",
- " account = random.randint(100_000_000, 999_999_999),\n",
- " balance = random.randint(100_000_000, 999_999_999) / 100\n",
- " ) for _ in range(3000)\n",
+ " account=random.randint(100_000_000, 999_999_999),\n",
+ " balance=random.randint(100_000_000, 999_999_999) / 100,\n",
+ " )\n",
+ " for _ in range(3000)\n",
")"
]
},
@@ -860,8 +866,8 @@
"metadata": {},
"outputs": [],
"source": [
- "person_keys = Person4.fetch('KEY')\n",
- "account_keys = Account4.fetch('KEY')"
+ "person_keys = Person4.fetch(\"KEY\")\n",
+ "account_keys = Account4.fetch(\"KEY\")"
]
},
{
@@ -891,9 +897,11 @@
"outputs": [],
"source": [
"AccountPerson4.insert(\n",
- " ({**random.choice(person_keys), **random.choice(account_keys)}\n",
- " for _ in range(4000)),\n",
- " skip_duplicates=True\n",
+ " (\n",
+ " {**random.choice(person_keys), **random.choice(account_keys)}\n",
+ " for _ in range(4000)\n",
+ " ),\n",
+ " skip_duplicates=True,\n",
")"
]
},
@@ -1194,7 +1202,8 @@
" full_name : varchar(30)\n",
" ssn : int\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Account5(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1202,7 +1211,8 @@
" --- \n",
" balance : decimal(10, 2)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class AccountPerson5(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -1467,7 +1477,8 @@
" ssn = null : int\n",
" unique index (ssn)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Account6(dj.Manual):\n",
" definition = \"\"\"\n",
diff --git a/db-course/QuizSystem.ipynb b/db-course/QuizSystem.ipynb
index 6259eaf..bf343bf 100644
--- a/db-course/QuizSystem.ipynb
+++ b/db-course/QuizSystem.ipynb
@@ -26,7 +26,7 @@
}
],
"source": [
- "schema = dj.Schema('quiz')"
+ "schema = dj.Schema(\"quiz\")"
]
},
{
@@ -333,13 +333,14 @@
" ---\n",
" subject_name : varchar(60)\n",
" \"\"\"\n",
- " \n",
+ "\n",
" contents = [\n",
- " (\"DaSy\", \"Database Systems\" ), \n",
- " (\"ML\", \"Machine Learning\" ),\n",
- " (\"SciViz\", \"Scientific Visualization\")\n",
+ " (\"DaSy\", \"Database Systems\"),\n",
+ " (\"ML\", \"Machine Learning\"),\n",
+ " (\"SciViz\", \"Scientific Visualization\"),\n",
" ]\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class User2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -349,7 +350,8 @@
" last_name : varchar(60)\n",
" birthday : date \n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Question2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -358,7 +360,8 @@
" ---\n",
" question : varchar(2000)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Answer2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -369,7 +372,8 @@
" correct = null : enum('YES')\n",
" unique index(subject_id, question_id, correct)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Quiz2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -379,25 +383,27 @@
" ---\n",
" date : date\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Question(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
" -> Question2\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Submission2(dj.Manual):\n",
" definition = \"\"\"\n",
" -> Quiz2\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Answer(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
" -> Answer2\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Result2(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -825,7 +831,7 @@
"text/plain": [
"*subject_id subject_name \n",
"+------------+ +------------+\n",
- "DaSy Database Syste\n",
+ "DaSy Database System\n",
"ML Machine Learni\n",
"SciViz Scientific Vis\n",
" (Total: 3)"
diff --git a/short_tutorials/DataJoint in 30mins.ipynb b/short_tutorials/DataJoint in 30mins.ipynb
index b940f48..3a0f13f 100644
--- a/short_tutorials/DataJoint in 30mins.ipynb
+++ b/short_tutorials/DataJoint in 30mins.ipynb
@@ -246,7 +246,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Subject.insert1({'subject_name': 'bob', 'sex': 'M'})"
+ "Subject.insert1({\"subject_name\": \"bob\", \"sex\": \"M\"})"
]
},
{
@@ -351,8 +351,8 @@
"outputs": [],
"source": [
"subject_list = [\n",
- " {'subject_name': 'alice', 'sex': 'F'},\n",
- " {'subject_name': 'anne', 'sex': 'F'}\n",
+ " {\"subject_name\": \"alice\", \"sex\": \"F\"},\n",
+ " {\"subject_name\": \"anne\", \"sex\": \"F\"},\n",
"]\n",
"Subject.insert(subject_list)"
]
@@ -463,8 +463,8 @@
"outputs": [],
"source": [
"session_list = [\n",
- " {'subject_name': 'bob', 'session_id': 1},\n",
- " {'subject_name': 'bob', 'session_id': 2}\n",
+ " {\"subject_name\": \"bob\", \"session_id\": 1},\n",
+ " {\"subject_name\": \"bob\", \"session_id\": 2},\n",
"]\n",
"Session.insert(session_list)"
]
@@ -587,7 +587,7 @@
" param_id: int\n",
" ---\n",
" multiplier: int\n",
- " \"\"\" "
+ " \"\"\""
]
},
{
@@ -605,20 +605,22 @@
" ---\n",
" result: longblob\n",
" \"\"\"\n",
- " \n",
+ "\n",
" def make(self, key):\n",
" # print status\n",
- " print(f\"Working on session_id {key['session_id']} and param_id {key['param_id']}\")\n",
- " \n",
+ " print(\n",
+ " f\"Working on session_id {key['session_id']} and param_id {key['param_id']}\"\n",
+ " )\n",
+ "\n",
" # retrieve data from upstream\n",
- " session_id = (Session & key).fetch1('session_id')\n",
- " multiplier_val = (Param & key).fetch1('multiplier')\n",
- " \n",
+ " session_id = (Session & key).fetch1(\"session_id\")\n",
+ " multiplier_val = (Param & key).fetch1(\"multiplier\")\n",
+ "\n",
" # perform computation\n",
" result = np.ones(10) * session_id * multiplier_val\n",
- " \n",
+ "\n",
" # store the results\n",
- " key['result'] = result\n",
+ " key[\"result\"] = result\n",
" self.insert1(key)"
]
},
@@ -811,10 +813,7 @@
"metadata": {},
"outputs": [],
"source": [
- "param_list = [\n",
- " {'param_id': 0, 'multiplier': 1},\n",
- " {'param_id': 1, 'multiplier': 16}\n",
- "]\n",
+ "param_list = [{\"param_id\": 0, \"multiplier\": 1}, {\"param_id\": 1, \"multiplier\": 16}]\n",
"Param.insert(param_list)"
]
},
@@ -1176,8 +1175,8 @@
"outputs": [],
"source": [
"session_list = [\n",
- " {'subject_name': 'alice', 'session_id': 1},\n",
- " {'subject_name': 'anne', 'session_id': 2}\n",
+ " {\"subject_name\": \"alice\", \"session_id\": 1},\n",
+ " {\"subject_name\": \"anne\", \"session_id\": 2},\n",
"]\n",
"Session.insert(session_list)"
]
@@ -1643,7 +1642,7 @@
}
],
"source": [
- "Session & 'session_id = 1'"
+ "Session & \"session_id = 1\""
]
},
{
@@ -1737,7 +1736,7 @@
}
],
"source": [
- "Session & 'subject_name = \"bob\"' & 'session_id = 1'"
+ "Session & 'subject_name = \"bob\"' & \"session_id = 1\""
]
},
{
@@ -1835,7 +1834,7 @@
}
],
"source": [
- "Session * Subject & 'subject_name = \"bob\"' & 'session_id = 1'"
+ "Session * Subject & 'subject_name = \"bob\"' & \"session_id = 1\""
]
},
{
@@ -2095,7 +2094,7 @@
}
],
"source": [
- "Analysis * Param & 'param_id = 1'"
+ "Analysis * Param & \"param_id = 1\""
]
},
{
@@ -2206,7 +2205,7 @@
}
],
"source": [
- "Analysis * Param & 'param_id = 1' & 'subject_name != \"bob\"'"
+ "Analysis * Param & \"param_id = 1\" & 'subject_name != \"bob\"'"
]
},
{
@@ -2216,7 +2215,7 @@
"metadata": {},
"outputs": [],
"source": [
- "query = Analysis * Param & 'param_id = 1' & 'subject_name != \"bob\"'"
+ "query = Analysis * Param & \"param_id = 1\" & 'subject_name != \"bob\"'"
]
},
{
@@ -2349,7 +2348,7 @@
}
],
"source": [
- "query.fetch(format='frame') # returns as pandas dataframe"
+ "query.fetch(format=\"frame\") # returns as pandas dataframe"
]
},
{
@@ -2372,7 +2371,7 @@
}
],
"source": [
- "query.fetch('result')"
+ "query.fetch(\"result\")"
]
},
{
diff --git a/short_tutorials/JuliaSets.ipynb b/short_tutorials/JuliaSets.ipynb
index 9788b62..f792e68 100644
--- a/short_tutorials/JuliaSets.ipynb
+++ b/short_tutorials/JuliaSets.ipynb
@@ -32,8 +32,8 @@
"source": [
"def julia(c, size=256, center=(0.0, 0.0), zoom=1.0, iters=256):\n",
" x, y = np.meshgrid(\n",
- " np.linspace(-1, 1, size)/zoom + center[0], \n",
- " np.linspace(-1, 1, size)/zoom + center[1], \n",
+ " np.linspace(-1, 1, size) / zoom + center[0],\n",
+ " np.linspace(-1, 1, size) / zoom + center[1],\n",
" )\n",
" z = x + 1j * y\n",
" im = np.zeros(z.shape)\n",
@@ -63,7 +63,7 @@
}
],
"source": [
- "plt.imshow(julia(-0.4+0.6j), cmap='magma')\n",
+ "plt.imshow(julia(-0.4 + 0.6j), cmap=\"magma\")\n",
"plt.axis(False);"
]
},
@@ -85,7 +85,7 @@
}
],
"source": [
- "plt.imshow(julia(-0.4+0.6j, center=(0.34, -0.30), zoom=10000.0), cmap='magma')\n",
+ "plt.imshow(julia(-0.4 + 0.6j, center=(0.34, -0.30), zoom=10000.0), cmap=\"magma\")\n",
"plt.axis(False);"
]
},
@@ -97,9 +97,9 @@
"outputs": [],
"source": [
"c = (\n",
- " -0.4 + 0.6j, \n",
- " -0.74543 + 0.11301j, \n",
- " -0.75 + 0.11j, \n",
+ " -0.4 + 0.6j,\n",
+ " -0.74543 + 0.11301j,\n",
+ " -0.75 + 0.11j,\n",
" -0.1 + 0.651j,\n",
" -0.835 - 0.2321j,\n",
" -0.70176 - 0.3842j,\n",
@@ -128,9 +128,9 @@
"\n",
"fig, ax = plt.subplots(3, 2, figsize=(7.5, 12))\n",
"for c_, a in zip(c, ax.flatten()):\n",
- " img = julia(c_, zoom=0.5) \n",
+ " img = julia(c_, zoom=0.5)\n",
" img += np.random.randn(*img.shape) * noise_level\n",
- " a.imshow(img, cmap='magma')\n",
+ " a.imshow(img, cmap=\"magma\")\n",
" a.axis(False)"
]
},
@@ -172,7 +172,7 @@
"outputs": [],
"source": [
"noise_level = 50.0\n",
- "img = julia(-0.4+0.6j, size=200)\n",
+ "img = julia(-0.4 + 0.6j, size=200)\n",
"noise_img = img + np.random.randn(*img.shape) * noise_level\n",
"median_img = filters.median(noise_img, disk(3))\n",
"tv_img = restoration.denoise_tv_chambolle(noise_img, weight=20.0)\n",
@@ -203,13 +203,16 @@
"fig, ax = plt.subplots(3, 2, figsize=(6, 9))\n",
"for a, (im, title) in zip(\n",
" ax.flatten(),\n",
- " ((img, 'original'), \n",
- " (noise_img, 'original+noise'),\n",
- " (gaussian_img, 'gaussian'),\n",
- " (median_img, 'median'), \n",
- " (wavelet_img, 'wavelet'),\n",
- " (tv_img, 'tv'), )):\n",
- " a.imshow(im, cmap='magma', vmin=0, vmax=255)\n",
+ " (\n",
+ " (img, \"original\"),\n",
+ " (noise_img, \"original+noise\"),\n",
+ " (gaussian_img, \"gaussian\"),\n",
+ " (median_img, \"median\"),\n",
+ " (wavelet_img, \"wavelet\"),\n",
+ " (tv_img, \"tv\"),\n",
+ " ),\n",
+ "):\n",
+ " a.imshow(im, cmap=\"magma\", vmin=0, vmax=255)\n",
" a.axis(False)\n",
" a.set_title(title)"
]
@@ -242,7 +245,7 @@
"source": [
"import datajoint as dj\n",
"\n",
- "schema = dj.Schema('julia')"
+ "schema = dj.Schema(\"julia\")"
]
},
{
@@ -260,7 +263,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class JuliaSpec(dj.Lookup):\n",
" definition = \"\"\"\n",
" julia_spec : smallint \n",
@@ -272,17 +275,16 @@
" center_imag=0.0 : float\n",
" zoom=1.0 : float\n",
" noise_level=50 : float\n",
- " \"\"\" \n",
+ " \"\"\"\n",
"\n",
- " contents = (\n",
+ " contents = (\n",
" dict(julia_spec=0, creal=-0.4, cimag=0.6, noise_level=50),\n",
" dict(julia_spec=1, creal=-0.7453, cimag=0.11301, noise_level=50),\n",
" dict(julia_spec=2, creal=-0.75, cimag=0.11, noise_level=50),\n",
" dict(julia_spec=3, creal=-0.1, cimag=0.651, noise_level=50),\n",
" dict(julia_spec=4, creal=-0.835, cimag=-0.2321, noise_level=50),\n",
" dict(julia_spec=5, creal=-0.70176, cimag=-0.3842, noise_level=50),\n",
- " )\n",
- "\n"
+ " )"
]
},
{
@@ -293,12 +295,16 @@
"outputs": [],
"source": [
"JuliaSpec.insert1(\n",
- " dict(julia_spec=10, \n",
- " creal=-0.4, cimag=0.6, \n",
- " center_real=0.34, center_imag=-0.30, \n",
- " zoom=10000.0, \n",
- " noise_level=50.0)\n",
- ") "
+ " dict(\n",
+ " julia_spec=10,\n",
+ " creal=-0.4,\n",
+ " cimag=0.6,\n",
+ " center_real=0.34,\n",
+ " center_imag=-0.30,\n",
+ " zoom=10000.0,\n",
+ " noise_level=50.0,\n",
+ " )\n",
+ ")"
]
},
{
@@ -318,12 +324,13 @@
"\n",
" def make(self, key):\n",
" spec = (JuliaSpec & key).fetch1()\n",
- " img = julia(spec['creal'] + 1j*spec['cimag'], \n",
- " size=spec['size'],\n",
- " center=(spec['center_real'], spec['center_imag']),\n",
- " zoom=spec['zoom'],\n",
- " )\n",
- " img += np.random.randn(*img.shape) * spec['noise_level']\n",
+ " img = julia(\n",
+ " spec[\"creal\"] + 1j * spec[\"cimag\"],\n",
+ " size=spec[\"size\"],\n",
+ " center=(spec[\"center_real\"], spec[\"center_imag\"]),\n",
+ " zoom=spec[\"zoom\"],\n",
+ " )\n",
+ " img += np.random.randn(*img.shape) * spec[\"noise_level\"]\n",
" self.insert1(dict(key, image=img.astype(np.float32)))"
]
},
@@ -467,7 +474,7 @@
}
],
"source": [
- "plt.imshow((JuliaImage & 'julia_spec=2').fetch1('image'))\n",
+ "plt.imshow((JuliaImage & \"julia_spec=2\").fetch1(\"image\"))\n",
"plt.axis(False);"
]
},
@@ -539,12 +546,11 @@
" params=null : blob\n",
" \"\"\"\n",
" contents = (\n",
- " (0, 'gaussian', dict(sigma=1.8)),\n",
- " (1, 'median', dict(radius=3)),\n",
- " (2, 'wavelet', {}),\n",
- " (3, 'tv', dict(weight=20.0))\n",
- " )\n",
- "\n"
+ " (0, \"gaussian\", dict(sigma=1.8)),\n",
+ " (1, \"median\", dict(radius=3)),\n",
+ " (2, \"wavelet\", {}),\n",
+ " (3, \"tv\", dict(weight=20.0)),\n",
+ " )"
]
},
{
@@ -554,7 +560,7 @@
"metadata": {},
"outputs": [],
"source": [
- "@schema \n",
+ "@schema\n",
"class JuliaDenoised(dj.Computed):\n",
" definition = \"\"\"\n",
" -> JuliaImage\n",
@@ -564,14 +570,14 @@
" \"\"\"\n",
"\n",
" def make(self, key):\n",
- " img = (JuliaImage & key).fetch1('image')\n",
- " method, params = (DenoiseMethod & key).fetch1('method', 'params')\n",
+ " img = (JuliaImage & key).fetch1(\"image\")\n",
+ " method, params = (DenoiseMethod & key).fetch1(\"method\", \"params\")\n",
"\n",
" if method == \"gaussian\":\n",
" result = filters.gaussian(img, **params)\n",
" elif method == \"median\":\n",
- " result = filters.median(img, disk(params['radius']))\n",
- " elif method == 'tv':\n",
+ " result = filters.median(img, disk(params[\"radius\"]))\n",
+ " elif method == \"tv\":\n",
" result = restoration.denoise_tv_chambolle(img, **params)\n",
" elif method == \"wavelet\":\n",
" result = restoration.denoise_wavelet(noise_img, **params)\n",
@@ -740,8 +746,8 @@
}
],
"source": [
- "keys = JuliaDenoised.fetch('KEY')\n",
- "img = ((JuliaDenoised & keys[20])).fetch1('denoised_image')\n",
+ "keys = JuliaDenoised.fetch(\"KEY\")\n",
+ "img = ((JuliaDenoised & keys[20])).fetch1(\"denoised_image\")\n",
"plt.imshow(img)\n",
"plt.axis(False);"
]
@@ -769,7 +775,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Blob.insert1(dict(id=1, blob=[1, 2, 3, 'Four']))"
+ "Blob.insert1(dict(id=1, blob=[1, 2, 3, \"Four\"]))"
]
},
{
diff --git a/short_tutorials/University.ipynb b/short_tutorials/University.ipynb
index 5d2fcf2..89e6406 100644
--- a/short_tutorials/University.ipynb
+++ b/short_tutorials/University.ipynb
@@ -30,7 +30,7 @@
},
"outputs": [],
"source": [
- "schema = dj.schema('university')\n"
+ "schema = dj.schema(\"university\")"
]
},
{
@@ -117,7 +117,8 @@
" course_name : varchar(200) # e.g. \"Neurobiology of Sensation and Movement.\"\n",
" credits : decimal(3,1) # number of credits earned by completing the course\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class Term(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -125,6 +126,7 @@
" term : enum('Spring', 'Summer', 'Fall')\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Section(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -134,13 +136,15 @@
" ---\n",
" auditorium : varchar(12)\n",
" \"\"\"\n",
- " \n",
+ "\n",
+ "\n",
"@schema\n",
"class CurrentTerm(dj.Manual):\n",
" definition = \"\"\"\n",
" -> Term\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Enroll(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -148,6 +152,7 @@
" -> Section\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class LetterGrade(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -156,6 +161,7 @@
" points : decimal(3,2)\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Grade(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -185,6 +191,7 @@
"import random\n",
"import itertools\n",
"import datetime\n",
+ "\n",
"fake = faker.Faker()"
]
},
@@ -195,14 +202,14 @@
"outputs": [],
"source": [
"def yield_students():\n",
- " fake_name = {'F': fake.name_female, 'M': fake.name_male}\n",
+ " fake_name = {\"F\": fake.name_female, \"M\": fake.name_male}\n",
" while True: # ignore invalid values\n",
" try:\n",
- " sex = random.choice(('F', 'M'))\n",
- " first_name, last_name = fake_name[sex]().split(' ')[:2]\n",
- " street_address, city = fake.address().split('\\n')\n",
- " city, state = city.split(', ')\n",
- " state, zipcode = state.split(' ') \n",
+ " sex = random.choice((\"F\", \"M\"))\n",
+ " first_name, last_name = fake_name[sex]().split(\" \")[:2]\n",
+ " street_address, city = fake.address().split(\"\\n\")\n",
+ " city, state = city.split(\", \")\n",
+ " state, zipcode = state.split(\" \")\n",
" except ValueError:\n",
" continue\n",
" else:\n",
@@ -215,8 +222,10 @@
" home_state=state,\n",
" home_zip=zipcode,\n",
" date_of_birth=str(\n",
- " fake.date_time_between(start_date=\"-35y\", end_date=\"-15y\").date()),\n",
- " home_phone = fake.phone_number()[:20])"
+ " fake.date_time_between(start_date=\"-35y\", end_date=\"-15y\").date()\n",
+ " ),\n",
+ " home_phone=fake.phone_number()[:20],\n",
+ " )"
]
},
{
@@ -225,8 +234,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Student.insert(\n",
- " dict(k, student_id=i) for i, k in zip(range(100,300), yield_students()))"
+ "Student.insert(dict(k, student_id=i) for i, k in zip(range(100, 300), yield_students()))"
]
},
{
@@ -245,15 +253,19 @@
"outputs": [],
"source": [
"Department.insert(\n",
- " dict(dept=dept, \n",
- " dept_name=name, \n",
- " dept_address=fake.address(), \n",
- " dept_phone=fake.phone_number()[:20])\n",
+ " dict(\n",
+ " dept=dept,\n",
+ " dept_name=name,\n",
+ " dept_address=fake.address(),\n",
+ " dept_phone=fake.phone_number()[:20],\n",
+ " )\n",
" for dept, name in [\n",
" [\"CS\", \"Computer Science\"],\n",
" [\"BIOL\", \"Life Sciences\"],\n",
" [\"PHYS\", \"Physics\"],\n",
- " [\"MATH\", \"Mathematics\"]])"
+ " [\"MATH\", \"Mathematics\"],\n",
+ " ]\n",
+ ")"
]
},
{
@@ -262,10 +274,13 @@
"metadata": {},
"outputs": [],
"source": [
- "StudentMajor.insert({**s, **d, \n",
- " 'declare_date':fake.date_between(start_date=datetime.date(1999,1,1))}\n",
- " for s, d in zip(Student.fetch('KEY'), random.choices(Department.fetch('KEY'), k=len(Student())))\n",
- " if random.random() < 0.75)"
+ "StudentMajor.insert(\n",
+ " {**s, **d, \"declare_date\": fake.date_between(start_date=datetime.date(1999, 1, 1))}\n",
+ " for s, d in zip(\n",
+ " Student.fetch(\"KEY\"), random.choices(Department.fetch(\"KEY\"), k=len(Student()))\n",
+ " )\n",
+ " if random.random() < 0.75\n",
+ ")"
]
},
{
@@ -284,82 +299,87 @@
"outputs": [],
"source": [
"# from https://www.utah.edu/\n",
- "Course.insert([\n",
- " ['BIOL', 1006, 'World of Dinosaurs', 3],\n",
- " ['BIOL', 1010, 'Biology in the 21st Century', 3],\n",
- " ['BIOL', 1030, 'Human Biology', 3],\n",
- " ['BIOL', 1210, 'Principles of Biology', 4],\n",
- " ['BIOL', 2010, 'Evolution & Diversity of Life', 3],\n",
- " ['BIOL', 2020, 'Principles of Cell Biology', 3],\n",
- " ['BIOL', 2021, 'Principles of Cell Science', 4],\n",
- " ['BIOL', 2030, 'Principles of Genetics', 3],\n",
- " ['BIOL', 2210, 'Human Genetics',3],\n",
- " ['BIOL', 2325, 'Human Anatomy', 4],\n",
- " ['BIOL', 2330, 'Plants & Society', 3],\n",
- " ['BIOL', 2355, 'Field Botany', 2],\n",
- " ['BIOL', 2420, 'Human Physiology', 4],\n",
+ "Course.insert(\n",
+ " [\n",
+ " [\"BIOL\", 1006, \"World of Dinosaurs\", 3],\n",
+ " [\"BIOL\", 1010, \"Biology in the 21st Century\", 3],\n",
+ " [\"BIOL\", 1030, \"Human Biology\", 3],\n",
+ " [\"BIOL\", 1210, \"Principles of Biology\", 4],\n",
+ " [\"BIOL\", 2010, \"Evolution & Diversity of Life\", 3],\n",
+ " [\"BIOL\", 2020, \"Principles of Cell Biology\", 3],\n",
+ " [\"BIOL\", 2021, \"Principles of Cell Science\", 4],\n",
+ " [\"BIOL\", 2030, \"Principles of Genetics\", 3],\n",
+ " [\"BIOL\", 2210, \"Human Genetics\", 3],\n",
+ " [\"BIOL\", 2325, \"Human Anatomy\", 4],\n",
+ " [\"BIOL\", 2330, \"Plants & Society\", 3],\n",
+ " [\"BIOL\", 2355, \"Field Botany\", 2],\n",
+ " [\"BIOL\", 2420, \"Human Physiology\", 4],\n",
+ " [\"PHYS\", 2040, \"Classcal Theoretical Physics II\", 4],\n",
+ " [\"PHYS\", 2060, \"Quantum Mechanics\", 3],\n",
+ " [\"PHYS\", 2100, \"General Relativity and Cosmology\", 3],\n",
+ " [\"PHYS\", 2140, \"Statistical Mechanics\", 4],\n",
+ " [\"PHYS\", 2210, \"Physics for Scientists and Engineers I\", 4],\n",
+ " [\"PHYS\", 2220, \"Physics for Scientists and Engineers II\", 4],\n",
+ " [\"PHYS\", 3210, \"Physics for Scientists I (Honors)\", 4],\n",
+ " [\"PHYS\", 3220, \"Physics for Scientists II (Honors)\", 4],\n",
+ " [\"MATH\", 1250, \"Calculus for AP Students I\", 4],\n",
+ " [\"MATH\", 1260, \"Calculus for AP Students II\", 4],\n",
+ " [\"MATH\", 1210, \"Calculus I\", 4],\n",
+ " [\"MATH\", 1220, \"Calculus II\", 4],\n",
+ " [\"MATH\", 2210, \"Calculus III\", 3],\n",
+ " [\"MATH\", 2270, \"Linear Algebra\", 4],\n",
+ " [\"MATH\", 2280, \"Introduction to Differential Equations\", 4],\n",
+ " [\"MATH\", 3210, \"Foundations of Analysis I\", 4],\n",
+ " [\"MATH\", 3220, \"Foundations of Analysis II\", 4],\n",
+ " [\"CS\", 1030, \"Foundations of Computer Science\", 3],\n",
+ " [\"CS\", 1410, \"Introduction to Object-Oriented Programming\", 4],\n",
+ " [\"CS\", 2420, \"Introduction to Algorithms & Data Structures\", 4],\n",
+ " [\"CS\", 2100, \"Discrete Structures\", 3],\n",
+ " [\"CS\", 3500, \"Software Practice\", 4],\n",
+ " [\"CS\", 3505, \"Software Practice II\", 3],\n",
+ " [\"CS\", 3810, \"Computer Organization\", 4],\n",
+ " [\"CS\", 4400, \"Computer Systems\", 4],\n",
+ " [\"CS\", 4150, \"Algorithms\", 3],\n",
+ " [\"CS\", 3100, \"Models of Computation\", 3],\n",
+ " [\"CS\", 3200, \"Introduction to Scientific Computing\", 3],\n",
+ " [\"CS\", 4000, \"Senior Capstone Project - Design Phase\", 3],\n",
+ " [\"CS\", 4500, \"Senior Capstone Project\", 3],\n",
+ " [\"CS\", 4940, \"Undergraduate Research\", 3],\n",
+ " [\"CS\", 4970, \"Computer Science Bachelor\" \"s Thesis\", 3],\n",
+ " ]\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Term.insert(\n",
+ " dict(term_year=year, term=term)\n",
+ " for year in range(1999, 2019)\n",
+ " for term in [\"Spring\", \"Summer\", \"Fall\"]\n",
+ ")\n",
"\n",
- " ['PHYS', 2040, 'Classcal Theoretical Physics II', 4],\n",
- " ['PHYS', 2060, 'Quantum Mechanics', 3],\n",
- " ['PHYS', 2100, 'General Relativity and Cosmology', 3],\n",
- " ['PHYS', 2140, 'Statistical Mechanics', 4],\n",
- " \n",
- " ['PHYS', 2210, 'Physics for Scientists and Engineers I', 4], \n",
- " ['PHYS', 2220, 'Physics for Scientists and Engineers II', 4],\n",
- " ['PHYS', 3210, 'Physics for Scientists I (Honors)', 4],\n",
- " ['PHYS', 3220, 'Physics for Scientists II (Honors)', 4],\n",
- " \n",
- " ['MATH', 1250, 'Calculus for AP Students I', 4],\n",
- " ['MATH', 1260, 'Calculus for AP Students II', 4],\n",
- " ['MATH', 1210, 'Calculus I', 4],\n",
- " ['MATH', 1220, 'Calculus II', 4],\n",
- " ['MATH', 2210, 'Calculus III', 3],\n",
- " \n",
- " ['MATH', 2270, 'Linear Algebra', 4],\n",
- " ['MATH', 2280, 'Introduction to Differential Equations', 4],\n",
- " ['MATH', 3210, 'Foundations of Analysis I', 4],\n",
- " ['MATH', 3220, 'Foundations of Analysis II', 4],\n",
- " \n",
- " ['CS', 1030, 'Foundations of Computer Science', 3],\n",
- " ['CS', 1410, 'Introduction to Object-Oriented Programming', 4],\n",
- " ['CS', 2420, 'Introduction to Algorithms & Data Structures', 4],\n",
- " ['CS', 2100, 'Discrete Structures', 3],\n",
- " ['CS', 3500, 'Software Practice', 4],\n",
- " ['CS', 3505, 'Software Practice II', 3],\n",
- " ['CS', 3810, 'Computer Organization', 4],\n",
- " ['CS', 4400, 'Computer Systems', 4],\n",
- " ['CS', 4150, 'Algorithms', 3],\n",
- " ['CS', 3100, 'Models of Computation', 3],\n",
- " ['CS', 3200, 'Introduction to Scientific Computing', 3],\n",
- " ['CS', 4000, 'Senior Capstone Project - Design Phase', 3],\n",
- " ['CS', 4500, 'Senior Capstone Project', 3],\n",
- " ['CS', 4940, 'Undergraduate Research', 3],\n",
- " ['CS', 4970, 'Computer Science Bachelor''s Thesis', 3]])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "Term.insert(dict(term_year=year, term=term) \n",
- " for year in range(1999, 2019) \n",
- " for term in ['Spring', 'Summer', 'Fall'])\n",
+ "Term().fetch(order_by=(\"term_year DESC\", \"term DESC\"), as_dict=True, limit=1)[0]\n",
"\n",
- "Term().fetch(order_by=('term_year DESC', 'term DESC'), as_dict=True, limit=1)[0]\n",
+ "CurrentTerm().insert1(\n",
+ " {**Term().fetch(order_by=(\"term_year DESC\", \"term DESC\"), as_dict=True, limit=1)[0]}\n",
+ ")\n",
"\n",
- "CurrentTerm().insert1({\n",
- " **Term().fetch(order_by=('term_year DESC', 'term DESC'), as_dict=True, limit=1)[0]})\n",
"\n",
"def make_section(prob):\n",
" for c in (Course * Term).proj():\n",
- " for sec in 'abcd':\n",
+ " for sec in \"abcd\":\n",
" if random.random() < prob:\n",
" break\n",
" yield {\n",
- " **c, 'section': sec, \n",
- " 'auditorium': random.choice('ABCDEF') + str(random.randint(1,100))} \n",
+ " **c,\n",
+ " \"section\": sec,\n",
+ " \"auditorium\": random.choice(\"ABCDEF\") + str(random.randint(1, 100)),\n",
+ " }\n",
+ "\n",
"\n",
"Section.insert(make_section(0.5))"
]
@@ -370,18 +390,21 @@
"metadata": {},
"outputs": [],
"source": [
- "LetterGrade.insert([\n",
- " ['A', 4.00],\n",
- " ['A-', 3.67],\n",
- " ['B+', 3.33],\n",
- " ['B', 3.00],\n",
- " ['B-', 2.67],\n",
- " ['C+', 2.33],\n",
- " ['C', 2.00],\n",
- " ['C-', 1.67],\n",
- " ['D+', 1.33],\n",
- " ['D', 1.00],\n",
- " ['F', 0.00]])"
+ "LetterGrade.insert(\n",
+ " [\n",
+ " [\"A\", 4.00],\n",
+ " [\"A-\", 3.67],\n",
+ " [\"B+\", 3.33],\n",
+ " [\"B\", 3.00],\n",
+ " [\"B-\", 2.67],\n",
+ " [\"C+\", 2.33],\n",
+ " [\"C\", 2.00],\n",
+ " [\"C-\", 1.67],\n",
+ " [\"D+\", 1.33],\n",
+ " [\"D\", 1.00],\n",
+ " [\"F\", 0.00],\n",
+ " ]\n",
+ ")"
]
},
{
@@ -390,19 +413,23 @@
"metadata": {},
"outputs": [],
"source": [
- "# Enrollment \n",
- "terms = Term().fetch('KEY')\n",
+ "# Enrollment\n",
+ "terms = Term().fetch(\"KEY\")\n",
"quit_prob = 0.1\n",
- "for student in tqdm(Student.fetch('KEY')):\n",
+ "for student in tqdm(Student.fetch(\"KEY\")):\n",
" start_term = random.randrange(len(terms))\n",
" for term in terms[start_term:]:\n",
" if random.random() < quit_prob:\n",
" break\n",
" else:\n",
- " sections = ((Section & term) - (Course & (Enroll & student))).fetch('KEY')\n",
+ " sections = ((Section & term) - (Course & (Enroll & student))).fetch(\"KEY\")\n",
" if sections:\n",
- " Enroll.insert({**student, **section} for section in \n",
- " random.sample(sections, random.randrange(min(5, len(sections)))))"
+ " Enroll.insert(\n",
+ " {**student, **section}\n",
+ " for section in random.sample(\n",
+ " sections, random.randrange(min(5, len(sections)))\n",
+ " )\n",
+ " )"
]
},
{
@@ -412,14 +439,16 @@
"outputs": [],
"source": [
"# assign random grades\n",
- "grades = LetterGrade.fetch('grade')\n",
+ "grades = LetterGrade.fetch(\"grade\")\n",
"\n",
- "grade_keys = Enroll.fetch('KEY')\n",
+ "grade_keys = Enroll.fetch(\"KEY\")\n",
"random.shuffle(grade_keys)\n",
- "grade_keys = grade_keys[:len(grade_keys)*9//10]\n",
+ "grade_keys = grade_keys[: len(grade_keys) * 9 // 10]\n",
"\n",
- "Grade.insert({**key, 'grade':grade} \n",
- " for key, grade in zip(grade_keys, random.choices(grades, k=len(grade_keys))))"
+ "Grade.insert(\n",
+ " {**key, \"grade\": grade}\n",
+ " for key, grade in zip(grade_keys, random.choices(grades, k=len(grade_keys)))\n",
+ ")"
]
},
{
@@ -452,7 +481,7 @@
"outputs": [],
"source": [
"# Students from Texas\n",
- "Student & {'home_state': 'TX'}"
+ "Student & {\"home_state\": \"TX\"}"
]
},
{
@@ -471,7 +500,7 @@
"outputs": [],
"source": [
"# Male students from outside Texas\n",
- "(Student & 'sex=\"M\"') - {'home_state': 'TX'}"
+ "(Student & 'sex=\"M\"') - {\"home_state\": \"TX\"}"
]
},
{
@@ -481,7 +510,7 @@
"outputs": [],
"source": [
"# Students from TX, OK, or NM\n",
- "Student & [{'home_state':'OK'}, {'home_state':'NM'}, {'home_state':'TX'}] "
+ "Student & [{\"home_state\": \"OK\"}, {\"home_state\": \"NM\"}, {\"home_state\": \"TX\"}]"
]
},
{
@@ -599,7 +628,7 @@
"metadata": {},
"outputs": [],
"source": [
- "#Students who have taken classes or have chosen a major\n",
+ "# Students who have taken classes or have chosen a major\n",
"Student & [Enroll, StudentMajor]"
]
},
@@ -647,7 +676,7 @@
"outputs": [],
"source": [
"# Enrollment with major information\n",
- "Enroll * StudentMajor.proj(major='dept')"
+ "Enroll * StudentMajor.proj(major=\"dept\")"
]
},
{
@@ -657,7 +686,7 @@
"outputs": [],
"source": [
"# Enrollment outside chosen major\n",
- "Enroll * StudentMajor.proj(major='dept') & 'major<>dept'"
+ "Enroll * StudentMajor.proj(major=\"dept\") & \"major<>dept\""
]
},
{
@@ -666,8 +695,8 @@
"metadata": {},
"outputs": [],
"source": [
- "# Enrollment not matching major \n",
- "Enroll - StudentMajor "
+ "# Enrollment not matching major\n",
+ "Enroll - StudentMajor"
]
},
{
@@ -677,7 +706,7 @@
"outputs": [],
"source": [
"# Total grade points\n",
- "(Course * Grade * LetterGrade).proj(total='points*credits')"
+ "(Course * Grade * LetterGrade).proj(total=\"points*credits\")"
]
},
{
@@ -694,7 +723,7 @@
"outputs": [],
"source": [
"# Students in each section\n",
- "Section.aggr(Enroll, n='count(*)')"
+ "Section.aggr(Enroll, n=\"count(*)\")"
]
},
{
@@ -704,7 +733,7 @@
"outputs": [],
"source": [
"# Average grade in each course\n",
- "Course.aggr(Grade*LetterGrade, avg_grade='avg(points)')"
+ "Course.aggr(Grade * LetterGrade, avg_grade=\"avg(points)\")"
]
},
{
@@ -714,8 +743,9 @@
"outputs": [],
"source": [
"# Fraction graded in each section\n",
- "(Section.aggr(Enroll,n='count(*)') * Section.aggr(Grade, m='count(*)')).proj(\n",
- " 'm','n',frac='m/n')"
+ "(Section.aggr(Enroll, n=\"count(*)\") * Section.aggr(Grade, m=\"count(*)\")).proj(\n",
+ " \"m\", \"n\", frac=\"m/n\"\n",
+ ")"
]
},
{
@@ -725,8 +755,7 @@
"outputs": [],
"source": [
"# Student GPA\n",
- "Student.aggr(Course * Grade * LetterGrade, \n",
- " gpa='sum(points*credits)/sum(credits)')"
+ "Student.aggr(Course * Grade * LetterGrade, gpa=\"sum(points*credits)/sum(credits)\")"
]
},
{
@@ -736,9 +765,8 @@
"outputs": [],
"source": [
"# Average GPA for each major\n",
- "gpa = Student.aggr(Course * Grade * LetterGrade,\n",
- " gpa='sum(points*credits)/sum(credits)')\n",
- "Department.aggr(StudentMajor*gpa, avg_gpa='avg(gpa)')"
+ "gpa = Student.aggr(Course * Grade * LetterGrade, gpa=\"sum(points*credits)/sum(credits)\")\n",
+ "Department.aggr(StudentMajor * gpa, avg_gpa=\"avg(gpa)\")"
]
},
{
diff --git a/tutorial_pipeline/__init__.py b/tutorial_pipeline/__init__.py
index 485bff0..4c4b6ef 100644
--- a/tutorial_pipeline/__init__.py
+++ b/tutorial_pipeline/__init__.py
@@ -1,3 +1,3 @@
from pathlib import Path
-data_dir = Path(__file__).absolute().parent.parent / 'data'
\ No newline at end of file
+data_dir = Path(__file__).absolute().parent.parent / "data"
diff --git a/tutorial_pipeline/ephys_cell_activity.py b/tutorial_pipeline/ephys_cell_activity.py
index 0439037..f13af30 100644
--- a/tutorial_pipeline/ephys_cell_activity.py
+++ b/tutorial_pipeline/ephys_cell_activity.py
@@ -9,6 +9,7 @@
# Table definitions
+
@schema
class Neuron(dj.Imported):
definition = """
@@ -25,23 +26,32 @@ def make(self, key):
# load the data
data = np.load(data_file)
- print('Populating neuron(s) for mouse_id={mouse_id} on session_date={session_date}'.format(**key))
+ print(
+ "Populating neuron(s) for mouse_id={mouse_id} on session_date={session_date}".format(
+ **key
+ )
+ )
for idx, d in enumerate(data):
# add the index of the 1st dimension as neuron_id
- key['neuron_id'] = idx
+ key["neuron_id"] = idx
# add the loaded data as the "activity" column
- key['activity'] = d
+ key["activity"] = d
# insert the key into self
self.insert1(key)
- print('\tPopulated neuron {neuron_id}'.format(**key))
+ print("\tPopulated neuron {neuron_id}".format(**key))
-Session.insert1({"mouse_id": 100,
- "session_date": "2017-06-01",
- "experiment_setup": 1,
- "experimenter": "Jacob Reimer"}, skip_duplicates=True)
+Session.insert1(
+ {
+ "mouse_id": 100,
+ "session_date": "2017-06-01",
+ "experiment_setup": 1,
+ "experimenter": "Jacob Reimer",
+ },
+ skip_duplicates=True,
+)
Neuron.populate()
diff --git a/tutorial_pipeline/imaging.py b/tutorial_pipeline/imaging.py
index 8757524..66bbc9b 100644
--- a/tutorial_pipeline/imaging.py
+++ b/tutorial_pipeline/imaging.py
@@ -9,6 +9,7 @@
# Table definitions
+
@schema
class Scan(dj.Manual):
definition = """
@@ -30,33 +31,64 @@ class AverageFrame(dj.Imported):
---
average_frame : longblob # average fluorescence across frames
"""
- def make(self, key): # key is the primary key of one of the entries in the table `Scan`
+
+ def make(
+ self, key
+ ): # key is the primary key of one of the entries in the table `Scan`
# fetch data directory from table Session
- data_path = (Session & key).fetch1('data_path')
-
+ data_path = (Session & key).fetch1("data_path")
+
# fetch data file name from table Scan
- file_name = (Scan & key).fetch1('file_name')
-
+ file_name = (Scan & key).fetch1("file_name")
+
# load the file
im = io.imread(os.path.join(data_path, file_name))
# compute the average image across the frames
avg_image = np.mean(im, axis=0)
-
+
# Now prepare the entry as a dictionary with all fields defined in the table.
- key['average_frame'] = avg_image # inherit the primary key from the table Scan
-
+ key["average_frame"] = avg_image # inherit the primary key from the table Scan
+
# insert entry with the method `insert1()`
self.insert1(key)
-
- print('\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}'.format(**key))
-
-Scan.insert([
- {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 1,
- 'depth': 150, 'wavelength': 920, 'laser_power': 26, 'fps': 15, 'file_name': 'example_scan_01.tif'},
- {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 2,
- 'depth': 200, 'wavelength': 920, 'laser_power': 24, 'fps': 15, 'file_name': 'example_scan_02.tif'},
- {'mouse_id': 100, 'session_date': '2017-05-25', 'scan_idx': 1,
- 'depth': 150, 'wavelength': 920, 'laser_power': 25, 'fps': 15, 'file_name': 'example_scan_03.tif'}],
- skip_duplicates=True)
+
+ print("\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}".format(**key))
+
+
+Scan.insert(
+ [
+ {
+ "mouse_id": 0,
+ "session_date": "2017-05-15",
+ "scan_idx": 1,
+ "depth": 150,
+ "wavelength": 920,
+ "laser_power": 26,
+ "fps": 15,
+ "file_name": "example_scan_01.tif",
+ },
+ {
+ "mouse_id": 0,
+ "session_date": "2017-05-15",
+ "scan_idx": 2,
+ "depth": 200,
+ "wavelength": 920,
+ "laser_power": 24,
+ "fps": 15,
+ "file_name": "example_scan_02.tif",
+ },
+ {
+ "mouse_id": 100,
+ "session_date": "2017-05-25",
+ "scan_idx": 1,
+ "depth": 150,
+ "wavelength": 920,
+ "laser_power": 25,
+ "fps": 15,
+ "file_name": "example_scan_03.tif",
+ },
+ ],
+ skip_duplicates=True,
+)
AverageFrame.populate()
diff --git a/tutorial_pipeline/mouse_session.py b/tutorial_pipeline/mouse_session.py
index 92f251d..608ec86 100644
--- a/tutorial_pipeline/mouse_session.py
+++ b/tutorial_pipeline/mouse_session.py
@@ -3,12 +3,13 @@
# fail-safe user name retrieval
-username = dj.conn().conn_info['user']
-schema = dj.schema('{}_tutorial_pipeline'.format(username))
+username = dj.conn().conn_info["user"]
+schema = dj.schema("{}_tutorial_pipeline".format(username))
# Table definitions
+
@schema
class Mouse(dj.Manual):
definition = """
@@ -31,46 +32,50 @@ class Session(dj.Manual):
experimenter : varchar(100) # experimenter name
data_path='' : varchar(255) #
"""
-
+
# Insert the following data into the table
-
+
mouse_data = [
- {'dob': "2017-03-01", 'mouse_id': 0, 'sex': 'M'},
- {'dob': "2016-11-19", 'mouse_id': 1, 'sex': 'M'},
- {'dob': "2016-11-20", 'mouse_id': 2, 'sex': 'unknown'},
- {'dob': "2016-12-25", 'mouse_id': 5, 'sex': 'F'},
- {'dob': "2017-01-01", 'mouse_id': 10, 'sex': 'F'},
- {'dob': "2017-01-03", 'mouse_id': 11, 'sex': 'F'},
- {'dob': "2017-05-12", 'mouse_id': 100, 'sex': 'F'}
+ {"dob": "2017-03-01", "mouse_id": 0, "sex": "M"},
+ {"dob": "2016-11-19", "mouse_id": 1, "sex": "M"},
+ {"dob": "2016-11-20", "mouse_id": 2, "sex": "unknown"},
+ {"dob": "2016-12-25", "mouse_id": 5, "sex": "F"},
+ {"dob": "2017-01-01", "mouse_id": 10, "sex": "F"},
+ {"dob": "2017-01-03", "mouse_id": 11, "sex": "F"},
+ {"dob": "2017-05-12", "mouse_id": 100, "sex": "F"},
]
session_data = [
- {'experiment_setup': 0,
- 'experimenter': 'Edgar Y. Walker',
- 'mouse_id': 0,
- 'session_date': "2017-05-15",
- 'data_path': data_dir.as_posix()
- },
- {'experiment_setup': 0,
- 'experimenter': 'Edgar Y. Walker',
- 'mouse_id': 0,
- 'session_date': "2017-05-19",
- 'data_path': data_dir.as_posix()
- },
- {'experiment_setup': 1,
- 'experimenter': 'Fabian Sinz',
- 'mouse_id': 5,
- 'session_date': "2017-01-05",
- 'data_path': data_dir.as_posix()
- },
- {'experiment_setup': 100,
- 'experimenter': 'Jacob Reimer',
- 'mouse_id': 100,
- 'session_date': "2017-05-25",
- 'data_path': data_dir.as_posix()
- }
+ {
+ "experiment_setup": 0,
+ "experimenter": "Edgar Y. Walker",
+ "mouse_id": 0,
+ "session_date": "2017-05-15",
+ "data_path": data_dir.as_posix(),
+ },
+ {
+ "experiment_setup": 0,
+ "experimenter": "Edgar Y. Walker",
+ "mouse_id": 0,
+ "session_date": "2017-05-19",
+ "data_path": data_dir.as_posix(),
+ },
+ {
+ "experiment_setup": 1,
+ "experimenter": "Fabian Sinz",
+ "mouse_id": 5,
+ "session_date": "2017-01-05",
+ "data_path": data_dir.as_posix(),
+ },
+ {
+ "experiment_setup": 100,
+ "experimenter": "Jacob Reimer",
+ "mouse_id": 100,
+ "session_date": "2017-05-25",
+ "data_path": data_dir.as_posix(),
+ },
]
Mouse.insert(mouse_data, skip_duplicates=True)
-Session.insert(session_data, skip_duplicates=True)
\ No newline at end of file
+Session.insert(session_data, skip_duplicates=True)
diff --git a/tutorials/01-DataJoint Basics.ipynb b/tutorials/01-DataJoint Basics.ipynb
index 2ea6198..f434487 100644
--- a/tutorials/01-DataJoint Basics.ipynb
+++ b/tutorials/01-DataJoint Basics.ipynb
@@ -311,7 +311,7 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.schema('tutorial')"
+ "schema = dj.schema(\"tutorial\")"
]
},
{
@@ -405,7 +405,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse.insert1((0, '2017-03-01', 'M'))"
+ "Mouse.insert1((0, \"2017-03-01\", \"M\"))"
]
},
{
@@ -430,11 +430,7 @@
"metadata": {},
"outputs": [],
"source": [
- "data = {\n",
- " 'mouse_id': 100,\n",
- " 'dob': '2017-05-12',\n",
- " 'sex': 'F'\n",
- "}"
+ "data = {\"mouse_id\": 100, \"dob\": \"2017-05-12\", \"sex\": \"F\"}"
]
},
{
@@ -468,11 +464,7 @@
"metadata": {},
"outputs": [],
"source": [
- "data = [\n",
- " (1, '2016-11-19', 'M'),\n",
- " (2, '2016-11-20', 'unknown'),\n",
- " (5, '2016-12-25', 'F')\n",
- "]"
+ "data = [(1, \"2016-11-19\", \"M\"), (2, \"2016-11-20\", \"unknown\"), (5, \"2016-12-25\", \"F\")]"
]
},
{
@@ -498,8 +490,8 @@
"outputs": [],
"source": [
"data = [\n",
- " {'mouse_id': 10, 'dob': '2017-01-01', 'sex': 'F'},\n",
- " {'mouse_id': 11, 'dob': '2017-01-03', 'sex': 'F'},\n",
+ " {\"mouse_id\": 10, \"dob\": \"2017-01-01\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 11, \"dob\": \"2017-01-03\", \"sex\": \"F\"},\n",
"]\n",
"\n",
"# insert them all\n",
@@ -538,10 +530,12 @@
"outputs": [],
"source": [
"Mouse.insert1(\n",
- "{'mouse_id': 0,\n",
- " 'dob': '2018-01-01',\n",
- " 'sex': 'M',\n",
- "})"
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"dob\": \"2018-01-01\",\n",
+ " \"sex\": \"M\",\n",
+ " }\n",
+ ")"
]
},
{
@@ -558,11 +552,11 @@
"outputs": [],
"source": [
"data = [\n",
- " {'mouse_id': 12, 'dob': '2017-03-21', 'sex': 'F'},\n",
- " {'mouse_id': 18, 'dob': '2017-05-01', 'sex': 'F'},\n",
- " {'mouse_id': 19, 'dob': '2018-07-21', 'sex': 'M'},\n",
- " {'mouse_id': 22, 'dob': '2019-12-15', 'sex': 'F'},\n",
- " {'mouse_id': 34, 'dob': '2018-09-22', 'sex': 'M'}\n",
+ " {\"mouse_id\": 12, \"dob\": \"2017-03-21\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 18, \"dob\": \"2017-05-01\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 19, \"dob\": \"2018-07-21\", \"sex\": \"M\"},\n",
+ " {\"mouse_id\": 22, \"dob\": \"2019-12-15\", \"sex\": \"F\"},\n",
+ " {\"mouse_id\": 34, \"dob\": \"2018-09-22\", \"sex\": \"M\"},\n",
"]\n",
"\n",
"# insert them all\n",
@@ -584,7 +578,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE - Insert more mice\n"
+ "# ENTER YOUR CODE - Insert more mice"
]
},
{
@@ -690,10 +684,10 @@
"outputs": [],
"source": [
"data = {\n",
- " 'mouse_id': 0,\n",
- " 'session_date': '2017-05-15',\n",
- " 'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker'\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
"}\n",
"\n",
"Session.insert1(data)"
@@ -722,10 +716,10 @@
"outputs": [],
"source": [
"data = {\n",
- " 'mouse_id': 0,\n",
- " 'session_date': '2018-01-15',\n",
- " 'experiment_setup': 100,\n",
- " 'experimenter': 'Jacob Reimer'\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2018-01-15\",\n",
+ " \"experiment_setup\": 100,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
"}\n",
"\n",
"Session.insert1(data)\n",
@@ -747,10 +741,10 @@
"outputs": [],
"source": [
"data = {\n",
- " 'mouse_id': 18,\n",
- " 'session_date': '2018-01-15',\n",
- " 'experiment_setup': 101,\n",
- " 'experimenter': 'Jacob Reimer'\n",
+ " \"mouse_id\": 18,\n",
+ " \"session_date\": \"2018-01-15\",\n",
+ " \"experiment_setup\": 101,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
"}\n",
"\n",
"# insert them all\n",
@@ -780,10 +774,10 @@
"outputs": [],
"source": [
"bad_data = {\n",
- " 'mouse_id': 9999, # this mouse doesn't exist!\n",
- " 'session_date': '2017-05-15',\n",
- " 'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker'\n",
+ " \"mouse_id\": 9999, # this mouse doesn't exist!\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
"}"
]
},
@@ -870,7 +864,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse & 'mouse_id = 0'"
+ "Mouse & \"mouse_id = 0\""
]
},
{
@@ -1045,7 +1039,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE\n"
+ "# ENTER YOUR CODE"
]
},
{
@@ -1082,7 +1076,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse & Session "
+ "Mouse & Session"
]
},
{
@@ -1153,7 +1147,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE\n"
+ "# ENTER YOUR CODE"
]
},
{
@@ -1246,7 +1240,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse.proj('sex')"
+ "Mouse.proj(\"sex\")"
]
},
{
@@ -1271,7 +1265,7 @@
"metadata": {},
"outputs": [],
"source": [
- "Mouse.proj(date_of_birth='dob')"
+ "Mouse.proj(date_of_birth=\"dob\")"
]
},
{
@@ -1288,7 +1282,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse * Session).proj(age='datediff(session_date, dob)')"
+ "(Mouse * Session).proj(age=\"datediff(session_date, dob)\")"
]
},
{
@@ -1305,7 +1299,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse * Session).proj(..., age='datediff(session_date, dob)')"
+ "(Mouse * Session).proj(..., age=\"datediff(session_date, dob)\")"
]
},
{
@@ -1407,7 +1401,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse & 'sex = \"M\"').fetch(format='frame')"
+ "(Mouse & 'sex = \"M\"').fetch(format=\"frame\")"
]
},
{
@@ -1423,7 +1417,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse & 'sex = \"M\"').fetch('KEY')"
+ "(Mouse & 'sex = \"M\"').fetch(\"KEY\")"
]
},
{
@@ -1439,7 +1433,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sex, dob = Mouse.fetch('sex', 'dob')"
+ "sex, dob = Mouse.fetch(\"sex\", \"dob\")"
]
},
{
@@ -1473,7 +1467,7 @@
"metadata": {},
"outputs": [],
"source": [
- "info = Mouse.fetch('sex', 'dob', as_dict=True)\n",
+ "info = Mouse.fetch(\"sex\", \"dob\", as_dict=True)\n",
"info"
]
},
@@ -1497,7 +1491,9 @@
"metadata": {},
"outputs": [],
"source": [
- "mouse_0 = (Mouse & {'mouse_id': 0}).fetch1() # \"fetch1()\" because we know there's only one\n",
+ "mouse_0 = (\n",
+ " Mouse & {\"mouse_id\": 0}\n",
+ ").fetch1() # \"fetch1()\" because we know there's only one\n",
"mouse_0"
]
},
@@ -1514,7 +1510,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse & {'mouse_id': 0}).fetch1('KEY')"
+ "(Mouse & {\"mouse_id\": 0}).fetch1(\"KEY\")"
]
},
{
@@ -1530,7 +1526,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sex, dob = (Mouse & {'mouse_id': 0}).fetch1('sex', 'dob')"
+ "sex, dob = (Mouse & {\"mouse_id\": 0}).fetch1(\"sex\", \"dob\")"
]
},
{
@@ -1579,7 +1575,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Mouse & 'mouse_id = 100').delete()"
+ "(Mouse & \"mouse_id = 100\").delete()"
]
},
{
diff --git a/tutorials/02-Calcium Imaging Imported Tables.ipynb b/tutorials/02-Calcium Imaging Imported Tables.ipynb
index e7eede2..16a3e58 100644
--- a/tutorials/02-Calcium Imaging Imported Tables.ipynb
+++ b/tutorials/02-Calcium Imaging Imported Tables.ipynb
@@ -60,6 +60,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -101,7 +102,8 @@
"metadata": {},
"outputs": [],
"source": [
- "schema = dj.schema('tutorial')\n",
+ "schema = dj.schema(\"tutorial\")\n",
+ "\n",
"\n",
"@schema\n",
"class Mouse(dj.Manual):\n",
@@ -113,6 +115,7 @@
" sex=\"unknown\" : enum('M','F','unknown') # sex\n",
" \"\"\"\n",
"\n",
+ "\n",
"@schema\n",
"class Session(dj.Manual):\n",
" definition = \"\"\"\n",
@@ -125,41 +128,46 @@
" data_path='' : varchar(255) # relative path\n",
" \"\"\"\n",
"\n",
+ "\n",
"mouse_data = [\n",
- " {'dob': \"2017-03-01\", 'mouse_id': 0, 'sex': 'M'},\n",
- " {'dob': \"2016-11-19\", 'mouse_id': 1, 'sex': 'M'},\n",
- " {'dob': \"2016-11-20\", 'mouse_id': 2, 'sex': 'unknown'},\n",
- " {'dob': \"2016-12-25\", 'mouse_id': 5, 'sex': 'F'},\n",
- " {'dob': \"2017-01-01\", 'mouse_id': 10, 'sex': 'F'},\n",
- " {'dob': \"2017-01-03\", 'mouse_id': 11, 'sex': 'F'},\n",
- " {'dob': \"2017-05-12\", 'mouse_id': 100, 'sex': 'F'}\n",
+ " {\"dob\": \"2017-03-01\", \"mouse_id\": 0, \"sex\": \"M\"},\n",
+ " {\"dob\": \"2016-11-19\", \"mouse_id\": 1, \"sex\": \"M\"},\n",
+ " {\"dob\": \"2016-11-20\", \"mouse_id\": 2, \"sex\": \"unknown\"},\n",
+ " {\"dob\": \"2016-12-25\", \"mouse_id\": 5, \"sex\": \"F\"},\n",
+ " {\"dob\": \"2017-01-01\", \"mouse_id\": 10, \"sex\": \"F\"},\n",
+ " {\"dob\": \"2017-01-03\", \"mouse_id\": 11, \"sex\": \"F\"},\n",
+ " {\"dob\": \"2017-05-12\", \"mouse_id\": 100, \"sex\": \"F\"},\n",
"]\n",
"\n",
"session_data = [\n",
- " {'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker',\n",
- " 'mouse_id': 0,\n",
- " 'session_date': \"2017-05-15\",\n",
- " 'data_path': '../data/'\n",
- " },\n",
- " {'experiment_setup': 0,\n",
- " 'experimenter': 'Edgar Y. Walker',\n",
- " 'mouse_id': 0,\n",
- " 'session_date': \"2017-05-19\",\n",
- " 'data_path': '../data/'\n",
- " },\n",
- " {'experiment_setup': 1,\n",
- " 'experimenter': 'Fabian Sinz',\n",
- " 'mouse_id': 5,\n",
- " 'session_date': \"2017-01-05\",\n",
- " 'data_path': '../data/'\n",
- " },\n",
- " {'experiment_setup': 100,\n",
- " 'experimenter': 'Jacob Reimer',\n",
- " 'mouse_id': 100,\n",
- " 'session_date': \"2017-05-25\",\n",
- " 'data_path': '../data/'\n",
- " }\n",
+ " {\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"data_path\": \"../data/\",\n",
+ " },\n",
+ " {\n",
+ " \"experiment_setup\": 0,\n",
+ " \"experimenter\": \"Edgar Y. Walker\",\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-19\",\n",
+ " \"data_path\": \"../data/\",\n",
+ " },\n",
+ " {\n",
+ " \"experiment_setup\": 1,\n",
+ " \"experimenter\": \"Fabian Sinz\",\n",
+ " \"mouse_id\": 5,\n",
+ " \"session_date\": \"2017-01-05\",\n",
+ " \"data_path\": \"../data/\",\n",
+ " },\n",
+ " {\n",
+ " \"experiment_setup\": 100,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
+ " \"mouse_id\": 100,\n",
+ " \"session_date\": \"2017-05-25\",\n",
+ " \"data_path\": \"../data/\",\n",
+ " },\n",
"]\n",
"\n",
"Mouse.insert(mouse_data, skip_duplicates=True)\n",
@@ -276,14 +284,40 @@
"metadata": {},
"outputs": [],
"source": [
- "Scan.insert([\n",
- " {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 1, \n",
- " 'depth': 150, 'wavelength': 920, 'laser_power': 26, 'fps': 15, 'file_name': 'example_scan_01.tif'},\n",
- " {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 2, \n",
- " 'depth': 200, 'wavelength': 920, 'laser_power': 24, 'fps': 15, 'file_name': 'example_scan_02.tif'},\n",
- " {'mouse_id': 0, 'session_date': '2017-05-15', 'scan_idx': 3, \n",
- " 'depth': 200, 'wavelength': 920, 'laser_power': 24, 'fps': 15, 'file_name': 'example_scan_03.tif'} \n",
- "])"
+ "Scan.insert(\n",
+ " [\n",
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"scan_idx\": 1,\n",
+ " \"depth\": 150,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 26,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_01.tif\",\n",
+ " },\n",
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"scan_idx\": 2,\n",
+ " \"depth\": 200,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 24,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_02.tif\",\n",
+ " },\n",
+ " {\n",
+ " \"mouse_id\": 0,\n",
+ " \"session_date\": \"2017-05-15\",\n",
+ " \"scan_idx\": 3,\n",
+ " \"depth\": 200,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 24,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_03.tif\",\n",
+ " },\n",
+ " ]\n",
+ ")"
]
},
{
@@ -312,8 +346,9 @@
"source": [
"import os\n",
"from skimage import io\n",
- "im = io.imread('../data/example_scan_01.tif')\n",
- "print('Number of frames = ',im.shape[0])"
+ "\n",
+ "im = io.imread(\"../data/example_scan_01.tif\")\n",
+ "print(\"Number of frames = \", im.shape[0])"
]
},
{
@@ -331,8 +366,8 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! \n",
- "av_frame = np.mean(im,axis=0)\n",
+ "# ENTER YOUR CODE!\n",
+ "av_frame = np.mean(im, axis=0)\n",
"plt.imshow(av_frame)"
]
},
@@ -497,8 +532,11 @@
" ---\n",
" average_frame : longblob # average fluorescence across frames\n",
" \"\"\"\n",
- " def make(self, key): # key is the primary key of one of the entries in the table `Scan`\n",
- " print('key is', key)"
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is the primary key of one of the entries in the table `Scan`\n",
+ " print(\"key is\", key)"
]
},
{
@@ -571,6 +609,8 @@
"outputs": [],
"source": [
"import os\n",
+ "\n",
+ "\n",
"@schema\n",
"class AverageFrame(dj.Imported):\n",
" definition = \"\"\"\n",
@@ -578,25 +618,28 @@
" ---\n",
" average_frame : longblob # average fluorescence across frames\n",
" \"\"\"\n",
- " def make(self, key): # key is the primary key of one of the entries in the table `Scan`\n",
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is the primary key of one of the entries in the table `Scan`\n",
" # fetch data directory from table Session\n",
- " data_path = (Session & key).fetch1('data_path')\n",
- " \n",
+ " data_path = (Session & key).fetch1(\"data_path\")\n",
+ "\n",
" # fetch data file name from table Scan\n",
- " file_name = (Scan & key).fetch1('file_name')\n",
- " \n",
+ " file_name = (Scan & key).fetch1(\"file_name\")\n",
+ "\n",
" # load the file\n",
" im = io.imread(os.path.join(data_path, file_name))\n",
" # compute the average image across the frames\n",
" avg_image = np.mean(im, axis=0)\n",
- " \n",
+ "\n",
" # Now prepare the entry as a dictionary with all fields defined in the table.\n",
- " key['average_frame'] = avg_image # inherit the primary key from the table Scan\n",
- " \n",
+ " key[\"average_frame\"] = avg_image # inherit the primary key from the table Scan\n",
+ "\n",
" # insert entry with the method `insert1()`\n",
" self.insert1(key)\n",
- " \n",
- " print('\\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}'.format(**key))"
+ "\n",
+ " print(\"\\tPopulated Scan {mouse_id} - {session_date} - {scan_idx}\".format(**key))"
]
},
{
@@ -674,16 +717,18 @@
"metadata": {},
"outputs": [],
"source": [
- "Scan.insert1({\n",
- " 'mouse_id': 100,\n",
- " 'session_date': '2017-05-25',\n",
- " 'scan_idx': 1,\n",
- " 'depth': 150,\n",
- " 'wavelength': 920,\n",
- " 'laser_power': 25,\n",
- " 'fps': 15,\n",
- " 'file_name': 'example_scan_03.tif'\n",
- "})"
+ "Scan.insert1(\n",
+ " {\n",
+ " \"mouse_id\": 100,\n",
+ " \"session_date\": \"2017-05-25\",\n",
+ " \"scan_idx\": 1,\n",
+ " \"depth\": 150,\n",
+ " \"wavelength\": 920,\n",
+ " \"laser_power\": 25,\n",
+ " \"fps\": 15,\n",
+ " \"file_name\": \"example_scan_03.tif\",\n",
+ " }\n",
+ ")"
]
},
{
@@ -741,8 +786,8 @@
"metadata": {},
"outputs": [],
"source": [
- "key = dict(mouse_id=0, session_date='2017-05-15', scan_idx=1)\n",
- "avg_image = (AverageFrame & key).fetch1('average_frame')\n",
+ "key = dict(mouse_id=0, session_date=\"2017-05-15\", scan_idx=1)\n",
+ "avg_image = (AverageFrame & key).fetch1(\"average_frame\")\n",
"plt.imshow(avg_image, cmap=plt.cm.gray)"
]
},
diff --git a/tutorials/03-Calcium Imaging Computed Tables.ipynb b/tutorials/03-Calcium Imaging Computed Tables.ipynb
index 5bd952d..b68bdbe 100644
--- a/tutorials/03-Calcium Imaging Computed Tables.ipynb
+++ b/tutorials/03-Calcium Imaging Computed Tables.ipynb
@@ -53,6 +53,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -156,7 +157,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = AverageFrame.fetch('KEY')\n",
+ "keys = AverageFrame.fetch(\"KEY\")\n",
"\n",
"# pick one key\n",
"key = keys[0]"
@@ -168,7 +169,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - preview an AverageFrame for a particular key\n"
+ "# ENTER YOUR CODE! - preview an AverageFrame for a particular key"
]
},
{
@@ -177,7 +178,7 @@
"metadata": {},
"outputs": [],
"source": [
- "avg_image = AverageFrame.fetch('average_frame')"
+ "avg_image = AverageFrame.fetch(\"average_frame\")"
]
},
{
@@ -218,7 +219,7 @@
"metadata": {},
"outputs": [],
"source": [
- "avg_image = (AverageFrame & key).fetch1('average_frame')"
+ "avg_image = (AverageFrame & key).fetch1(\"average_frame\")"
]
},
{
@@ -315,7 +316,7 @@
"source": [
"label_im, nb_labels = ndimage.label(mask)\n",
"print(nb_labels)\n",
- "plt.imshow(label_im) "
+ "plt.imshow(label_im)"
]
},
{
@@ -341,7 +342,7 @@
"outputs": [],
"source": [
"size_cutoff = 50\n",
- "sizes = np.array([np.sum(label_im==i) for i in np.unique(label_im)])\n",
+ "sizes = np.array([np.sum(label_im == i) for i in np.unique(label_im)])\n",
"\n",
"small_size_filter = sizes < size_cutoff\n",
"pixel_to_remove = small_size_filter[label_im]\n",
@@ -371,7 +372,7 @@
"outputs": [],
"source": [
"rois = []\n",
- "for i in np.unique(label_im)[1:]: # 0 is the background\n",
+ "for i in np.unique(label_im)[1:]: # 0 is the background\n",
" rois.append(label_im == i)"
]
},
@@ -477,6 +478,7 @@
" ---\n",
" segmented_masks : longblob # overview of segmented masks\n",
" \"\"\"\n",
+ "\n",
" class Roi(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -523,6 +525,7 @@
" ---\n",
" segmented_masks : longblob # overview of segmented masks\n",
" \"\"\"\n",
+ "\n",
" class Roi(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -530,40 +533,43 @@
" ---\n",
" mask : longblob # mask of this roi\n",
" \"\"\"\n",
- " \n",
- " def make(self, key): # key is one of the primary keys of the join product of AverageFrame and ParameterSet\n",
- " \n",
- " print('Populating for: ', key)\n",
- " \n",
+ "\n",
+ " def make(\n",
+ " self, key\n",
+ " ): # key is one of the primary keys of the join product of AverageFrame and ParameterSet\n",
+ " print(\"Populating for: \", key)\n",
+ "\n",
" # fetch average image from the previous table AverageFrame\n",
- " avg_image = (AverageFrame & key).fetch1('average_frame')\n",
- " \n",
+ " avg_image = (AverageFrame & key).fetch1(\"average_frame\")\n",
+ "\n",
" # fetch the parameters threshold and size_cutoff\n",
" threshold, size_cutoff = (SegmentationParam & key).fetch1(\n",
- " 'threshold', 'size_cutoff')\n",
- " \n",
+ " \"threshold\", \"size_cutoff\"\n",
+ " )\n",
+ "\n",
" # perform the thresholding and blob detection\n",
" mask = avg_image > threshold\n",
" label_im, nb_labels = ndimage.label(mask)\n",
- " sizes = np.array([np.sum(label_im==i) for i in np.unique(label_im)])\n",
+ " sizes = np.array([np.sum(label_im == i) for i in np.unique(label_im)])\n",
"\n",
" small_size_filter = sizes < size_cutoff\n",
" pixel_to_remove = small_size_filter[label_im]\n",
"\n",
" label_im[pixel_to_remove] = 0\n",
- " \n",
+ "\n",
" rois = []\n",
- " for i in np.unique(label_im)[1:]: # 0 is the background\n",
+ " for i in np.unique(label_im)[1:]: # 0 is the background\n",
" rois.append(\n",
- " dict(**key, # inherit primary key from master table\n",
- " roi_idx=i, \n",
- " mask=label_im==i))\n",
- " \n",
+ " dict(\n",
+ " **key, # inherit primary key from master table\n",
+ " roi_idx=i,\n",
+ " mask=label_im == i\n",
+ " )\n",
+ " )\n",
+ "\n",
" # insert into the master table first\n",
- " self.insert1(\n",
- " dict(**key, segmented_masks=label_im)\n",
- " )\n",
- " print('Detected {} ROIs!\\n'.format(len(rois)))\n",
+ " self.insert1(dict(**key, segmented_masks=label_im))\n",
+ " print(\"Detected {} ROIs!\\n\".format(len(rois)))\n",
" # then insert into the part table\n",
" self.Roi.insert(rois)"
]
@@ -629,7 +635,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the Segmentation table\n"
+ "# ENTER YOUR CODE! - populate the Segmentation table"
]
},
{
@@ -693,7 +699,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the Segmentation table for real!\n"
+ "# ENTER YOUR CODE! - populate the Segmentation table for real!"
]
},
{
@@ -750,7 +756,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the \"missing\" entry in Segmentation table\n"
+ "# ENTER YOUR CODE! - populate the \"missing\" entry in Segmentation table"
]
},
{
@@ -797,7 +803,7 @@
"outputs": [],
"source": [
"# Select 'No' when it pops up\n",
- "(Segmentation & 'seg_param_id = 0').delete()"
+ "(Segmentation & \"seg_param_id = 0\").delete()"
]
},
{
@@ -813,7 +819,7 @@
"metadata": {},
"outputs": [],
"source": [
- "SegmentationParam & 'seg_param_id = 0'"
+ "SegmentationParam & \"seg_param_id = 0\""
]
},
{
@@ -822,7 +828,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(SegmentationParam() & 'seg_param_id = 0').delete()"
+ "(SegmentationParam() & \"seg_param_id = 0\").delete()"
]
},
{
@@ -855,7 +861,7 @@
"outputs": [],
"source": [
"# show one example ROI\n",
- "masks = (Segmentation.Roi).fetch('mask')\n",
+ "masks = (Segmentation.Roi).fetch(\"mask\")\n",
"plt.imshow(masks[4])"
]
},
@@ -883,14 +889,18 @@
"source": [
"from skimage import io\n",
"import os\n",
+ "\n",
+ "\n",
"@schema\n",
- "class Fluorescence(dj.Imported): # imported table because it also rely on the external tiff file.\n",
+ "class Fluorescence(\n",
+ " dj.Imported\n",
+ "): # imported table because it also rely on the external tiff file.\n",
" definition = \"\"\"\n",
" -> Segmentation\n",
" ---\n",
" time : longblob # time for each frame\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Trace(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -898,43 +908,40 @@
" ---\n",
" trace : longblob # fluorescence trace of each ROI\n",
" \"\"\"\n",
- " \n",
+ "\n",
" # the master table is mainly to perform the computation, while the part table contains the result\n",
" def make(self, key):\n",
- " \n",
- " print('Populating: {}'.format(key))\n",
+ " print(\"Populating: {}\".format(key))\n",
" # fetch data directory from table Session\n",
- " data_path = (Session & key).fetch1('data_path')\n",
- " \n",
+ " data_path = (Session & key).fetch1(\"data_path\")\n",
+ "\n",
" # fetch data file name from table Scan\n",
- " file_name = (Scan & key).fetch1('file_name')\n",
- " \n",
+ " file_name = (Scan & key).fetch1(\"file_name\")\n",
+ "\n",
" # load the file\n",
" im = io.imread(os.path.join(data_path, file_name))\n",
- " \n",
+ "\n",
" # get dimensions of the image and reshape\n",
" n, w, h = np.shape(im)\n",
- " im_reshaped = np.reshape(im, [n, w*h])\n",
- " \n",
+ " im_reshaped = np.reshape(im, [n, w * h])\n",
+ "\n",
" # get frames per second to compute time\n",
- " fps = (Scan & key).fetch1('fps')\n",
- " \n",
+ " fps = (Scan & key).fetch1(\"fps\")\n",
+ "\n",
" # insert into master table first\n",
- " self.insert1(dict(**key, time=np.array(range(n))/fps))\n",
- " \n",
- " \n",
+ " self.insert1(dict(**key, time=np.array(range(n)) / fps))\n",
+ "\n",
" # extract traces\n",
- " roi_keys, masks = (Segmentation.Roi & key).fetch('KEY', 'mask')\n",
- " \n",
+ " roi_keys, masks = (Segmentation.Roi & key).fetch(\"KEY\", \"mask\")\n",
+ "\n",
" traces = []\n",
" for roi_key, mask in zip(roi_keys, masks):\n",
- " \n",
" # reshape mask\n",
- " mask_reshaped = np.reshape(mask, [w*h])\n",
+ " mask_reshaped = np.reshape(mask, [w * h])\n",
" trace = np.mean(im_reshaped[:, mask_reshaped], axis=1)\n",
- " \n",
+ "\n",
" traces.append(dict(**roi_key, trace=trace))\n",
- " \n",
+ "\n",
" self.Trace.insert(traces)"
]
},
@@ -944,7 +951,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the Fluorescence table\n"
+ "# ENTER YOUR CODE! - populate the Fluorescence table"
]
},
{
diff --git a/tutorials/04-Electrophysiology Imported Tables.ipynb b/tutorials/04-Electrophysiology Imported Tables.ipynb
index 41823b7..5aa75c6 100644
--- a/tutorials/04-Electrophysiology Imported Tables.ipynb
+++ b/tutorials/04-Electrophysiology Imported Tables.ipynb
@@ -52,6 +52,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -136,6 +137,7 @@
"outputs": [],
"source": [
"from tutorial_pipeline import data_dir\n",
+ "\n",
"data_dir"
]
},
@@ -166,7 +168,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Session.fetch('KEY')\n",
+ "keys = Session.fetch(\"KEY\")\n",
"keys"
]
},
@@ -183,7 +185,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - restrict session using keys and any element inside keys. \n"
+ "# ENTER YOUR CODE! - restrict session using keys and any element inside keys."
]
},
{
@@ -209,7 +211,9 @@
"metadata": {},
"outputs": [],
"source": [
- "filename = '{data_dir}/data_{mouse_id}_{session_date}.npy'.format(**key, data_dir=data_dir)\n",
+ "filename = \"{data_dir}/data_{mouse_id}_{session_date}.npy\".format(\n",
+ " **key, data_dir=data_dir\n",
+ ")\n",
"filename"
]
},
@@ -315,7 +319,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - plot a Diagram of the schema\n"
+ "# ENTER YOUR CODE! - plot a Diagram of the schema"
]
},
{
@@ -393,7 +397,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - call `populate` on the table\n"
+ "# ENTER YOUR CODE! - call `populate` on the table"
]
},
{
@@ -417,8 +421,9 @@
" ---\n",
" activity: longblob # electric activity of the neuron\n",
" \"\"\"\n",
- " def make(self, key): # `make` takes a single argument `key`\n",
- " print('key is', key)"
+ "\n",
+ " def make(self, key): # `make` takes a single argument `key`\n",
+ " print(\"key is\", key)"
]
},
{
@@ -434,7 +439,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - call `populate` on the table\n"
+ "# ENTER YOUR CODE! - call `populate` on the table"
]
},
{
@@ -497,24 +502,31 @@
" ---\n",
" activity: longblob # electric activity of the neuron\n",
" \"\"\"\n",
+ "\n",
" def make(self, key):\n",
" # use key dictionary to determine the data file path\n",
- " data_file = '{data_dir}/data_{mouse_id}_{session_date}.npy'.format(**key, data_dir=data_dir)\n",
+ " data_file = \"{data_dir}/data_{mouse_id}_{session_date}.npy\".format(\n",
+ " **key, data_dir=data_dir\n",
+ " )\n",
"\n",
" # load the data\n",
" data = np.load(data_file)\n",
- " \n",
+ "\n",
" for idx, d in enumerate(data):\n",
" # add the index of the 1st dimension as neuron_id\n",
- " key['neuron_id'] = idx\n",
- " \n",
+ " key[\"neuron_id\"] = idx\n",
+ "\n",
" # add the loaded data as the \"activity\" column\n",
- " key['activity'] = d\n",
+ " key[\"activity\"] = d\n",
"\n",
" # insert the key into self\n",
" self.insert1(key)\n",
"\n",
- " print('Populated neuron={neuron_id} for mouse_id={mouse_id} on session_date={session_date}'.format(**key))"
+ " print(\n",
+ " \"Populated neuron={neuron_id} for mouse_id={mouse_id} on session_date={session_date}\".format(\n",
+ " **key\n",
+ " )\n",
+ " )"
]
},
{
@@ -592,12 +604,14 @@
"metadata": {},
"outputs": [],
"source": [
- "Session.insert1({\n",
- " \"mouse_id\": 100,\n",
- " \"session_date\": \"2017-06-01\",\n",
- " \"experiment_setup\": 1,\n",
- " \"experimenter\": \"Jacob Reimer\"\n",
- "})"
+ "Session.insert1(\n",
+ " {\n",
+ " \"mouse_id\": 100,\n",
+ " \"session_date\": \"2017-06-01\",\n",
+ " \"experiment_setup\": 1,\n",
+ " \"experimenter\": \"Jacob Reimer\",\n",
+ " }\n",
+ ")"
]
},
{
diff --git a/tutorials/05-Electrophysiology Computed Tables.ipynb b/tutorials/05-Electrophysiology Computed Tables.ipynb
index 06d9fd9..6693053 100644
--- a/tutorials/05-Electrophysiology Computed Tables.ipynb
+++ b/tutorials/05-Electrophysiology Computed Tables.ipynb
@@ -53,6 +53,7 @@
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
+ "\n",
"%matplotlib inline"
]
},
@@ -149,7 +150,7 @@
"metadata": {},
"outputs": [],
"source": [
- "keys = Neuron().fetch('KEY')\n",
+ "keys = Neuron().fetch(\"KEY\")\n",
"\n",
"# pick one key\n",
"key = keys[0]"
@@ -177,7 +178,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron() & key).fetch('activity')"
+ "activity = (Neuron() & key).fetch(\"activity\")"
]
},
{
@@ -218,7 +219,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron() & key).fetch1('activity')"
+ "activity = (Neuron() & key).fetch1(\"activity\")"
]
},
{
@@ -243,7 +244,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - compute the mean of the activity\n"
+ "# ENTER YOUR CODE! - compute the mean of the activity"
]
},
{
@@ -252,7 +253,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - compute the standard deviation of the activity\n"
+ "# ENTER YOUR CODE! - compute the standard deviation of the activity"
]
},
{
@@ -261,7 +262,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - compute the maximum of the activity\n"
+ "# ENTER YOUR CODE! - compute the maximum of the activity"
]
},
{
@@ -416,7 +417,7 @@
"outputs": [],
"source": [
"# get all keys\n",
- "keys = Neuron.fetch('KEY')"
+ "keys = Neuron.fetch(\"KEY\")"
]
},
{
@@ -426,7 +427,7 @@
"outputs": [],
"source": [
"# fetch all activities - returned as NumPy array of NumPy arrays\n",
- "activities = (Neuron & keys).fetch('activity')"
+ "activities = (Neuron & keys).fetch(\"activity\")"
]
},
{
@@ -438,8 +439,8 @@
"fig, axs = plt.subplots(1, len(activities), figsize=(16, 4))\n",
"for activity, ax in zip(activities, axs.ravel()):\n",
" ax.plot(activity)\n",
- " ax.set_xlabel('Time')\n",
- " ax.set_ylabel('Activity')\n",
+ " ax.set_xlabel(\"Time\")\n",
+ " ax.set_ylabel(\"Activity\")\n",
"\n",
"fig.tight_layout()"
]
@@ -466,7 +467,7 @@
"metadata": {},
"outputs": [],
"source": [
- "activity = (Neuron & key).fetch1('activity')"
+ "activity = (Neuron & key).fetch1(\"activity\")"
]
},
{
@@ -476,8 +477,8 @@
"outputs": [],
"source": [
"plt.plot(activity)\n",
- "plt.xlabel('Time')\n",
- "plt.ylabel('Activity')\n",
+ "plt.xlabel(\"Time\")\n",
+ "plt.ylabel(\"Activity\")\n",
"plt.xlim([0, 300])"
]
},
@@ -497,11 +498,11 @@
"threshold = 0.5\n",
"\n",
"# find activity above threshold\n",
- "above_thrs = (activity > threshold).astype(int) \n",
+ "above_thrs = (activity > threshold).astype(int)\n",
"\n",
"plt.plot(activity)\n",
"plt.plot(above_thrs)\n",
- "plt.xlabel('Time')\n",
+ "plt.xlabel(\"Time\")\n",
"plt.xlim([0, 300])"
]
},
@@ -518,13 +519,14 @@
"metadata": {},
"outputs": [],
"source": [
- "rising = (np.diff(above_thrs) > 0).astype(int) # find rising edge of crossing threshold\n",
- "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
+ "rising = (np.diff(above_thrs) > 0).astype(int) # find rising edge of crossing threshold\n",
+ "spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
"\n",
"plt.plot(activity)\n",
"plt.plot(above_thrs)\n",
- "plt.plot(np.where(spikes>0), 1, 'ro'); # plot only spike points\n",
- "plt.xlabel('Time')\n",
+ "plt.plot(np.where(spikes > 0), 1, \"ro\")\n",
+ "# plot only spike points\n",
+ "plt.xlabel(\"Time\")\n",
"plt.xlim([0, 300])"
]
},
@@ -541,7 +543,7 @@
"metadata": {},
"outputs": [],
"source": [
- "count = spikes.sum() # compute total spike counts\n",
+ "count = spikes.sum() # compute total spike counts\n",
"count"
]
},
@@ -692,7 +694,7 @@
" spikes: longblob # detected spikes\n",
" count: int # total number of detected spikes\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Waveform(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -740,7 +742,7 @@
" spikes: longblob # detected spikes\n",
" count: int # total number of detected spikes\n",
" \"\"\"\n",
- " \n",
+ "\n",
" class Waveform(dj.Part):\n",
" definition = \"\"\"\n",
" -> master\n",
@@ -748,33 +750,39 @@
" ---\n",
" waveform: longblob # waveform extracted from this spike\n",
" \"\"\"\n",
- " \n",
+ "\n",
" def make(self, key):\n",
- " print('Populating for: ', key)\n",
+ " print(\"Populating for: \", key)\n",
"\n",
- " activity = (Neuron() & key).fetch1('activity')\n",
- " threshold = (SpikeDetectionParam() & key).fetch1('threshold')\n",
+ " activity = (Neuron() & key).fetch1(\"activity\")\n",
+ " threshold = (SpikeDetectionParam() & key).fetch1(\"threshold\")\n",
"\n",
- " above_thrs = (activity > threshold).astype(int) # find activity above threshold\n",
- " rising = (np.diff(above_thrs) > 0).astype(int) # find rising edge of crossing threshold\n",
- " spikes = np.hstack((0, rising)) # prepend 0 to account for shortening due to np.diff\n",
+ " above_thrs = (activity > threshold).astype(int) # find activity above threshold\n",
+ " rising = (np.diff(above_thrs) > 0).astype(\n",
+ " int\n",
+ " ) # find rising edge of crossing threshold\n",
+ " spikes = np.hstack(\n",
+ " (0, rising)\n",
+ " ) # prepend 0 to account for shortening due to np.diff\n",
"\n",
- " count = spikes.sum() # compute total spike counts\n",
- " print('Detected {} spikes!\\n'.format(count))\n",
+ " count = spikes.sum() # compute total spike counts\n",
+ " print(\"Detected {} spikes!\\n\".format(count))\n",
"\n",
" # create and insert a new dictionary containing `key` and additionally `spikes` and `count`\n",
- " self.insert1(dict(key, spikes=spikes, count=count)) \n",
- " \n",
+ " self.insert1(dict(key, spikes=spikes, count=count))\n",
+ "\n",
" # extract waveform for the `Waveform` part-table\n",
- " before_spk, after_spk = 40, 40 # extract 40 sample points before and after a spike as the waveform\n",
- " for spk_id, spk in enumerate(np.where(spikes==1)[0]):\n",
- " \n",
+ " before_spk, after_spk = (\n",
+ " 40,\n",
+ " 40,\n",
+ " ) # extract 40 sample points before and after a spike as the waveform\n",
+ " for spk_id, spk in enumerate(np.where(spikes == 1)[0]):\n",
" # For simplicity, skip the spikes too close to the beginning or the end\n",
- " if spk - before_spk < 0 or spk + after_spk > len(activity) + 1: \n",
+ " if spk - before_spk < 0 or spk + after_spk > len(activity) + 1:\n",
" continue\n",
- " \n",
- " wf = activity[spk - before_spk: spk + after_spk] \n",
- " \n",
+ "\n",
+ " wf = activity[spk - before_spk : spk + after_spk]\n",
+ "\n",
" # create and insert a new dictionary containing `key` and additionally `spike_id` and `waveform`\n",
" self.Waveform.insert1(dict(key, spike_id=spk_id, waveform=wf))"
]
@@ -822,7 +830,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the Spikes table\n"
+ "# ENTER YOUR CODE! - populate the Spikes table"
]
},
{
@@ -886,7 +894,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the Spikes table for real!\n"
+ "# ENTER YOUR CODE! - populate the Spikes table for real!"
]
},
{
@@ -918,7 +926,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - Now, build a query for the waveforms from mouse 100, session on \"2017-05-25\", with detection param 0\n"
+ "# ENTER YOUR CODE! - Now, build a query for the waveforms from mouse 100, session on \"2017-05-25\", with detection param 0"
]
},
{
@@ -927,7 +935,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - try fetching all the waveforms\n"
+ "# ENTER YOUR CODE! - try fetching all the waveforms"
]
},
{
@@ -936,7 +944,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - and plot the average waveform\n"
+ "# ENTER YOUR CODE! - and plot the average waveform"
]
},
{
@@ -977,7 +985,7 @@
"metadata": {},
"outputs": [],
"source": [
- "# ENTER YOUR CODE! - populate the \"missing\" entry in Spikes table\n"
+ "# ENTER YOUR CODE! - populate the \"missing\" entry in Spikes table"
]
},
{
@@ -1023,7 +1031,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(Spikes & 'sdp_id = 0').delete()"
+ "(Spikes & \"sdp_id = 0\").delete()"
]
},
{
@@ -1039,7 +1047,7 @@
"metadata": {},
"outputs": [],
"source": [
- "SpikeDetectionParam() & 'sdp_id = 0'"
+ "SpikeDetectionParam() & \"sdp_id = 0\""
]
},
{
@@ -1048,7 +1056,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(SpikeDetectionParam() & 'sdp_id = 0').delete()"
+ "(SpikeDetectionParam() & \"sdp_id = 0\").delete()"
]
},
{