Skip to content

Commit

Permalink
added inference
Browse files Browse the repository at this point in the history
  • Loading branch information
GieziJo committed Jun 9, 2023
1 parent 7f9468a commit 6fc3877
Show file tree
Hide file tree
Showing 6 changed files with 892 additions and 1,653 deletions.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -110,19 +110,28 @@
{
"cell_type": "code",
"execution_count": 8,
"id": "regional-orientation",
"id": "1c1b20fe-6d44-44dc-abca-3f540c0acfc6",
"metadata": {},
"outputs": [],
"source": [
"import importlib"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "622643a9-5863-4cb4-a9ae-f8bb97821b51",
"metadata": {},
"outputs": [],
"source": [
"import importlib\n",
"import Helpers.MODIS8DaysHelper as mh\n",
"import Helpers.GEEHelpers as GEEHelpers\n",
"import Helpers.StaticFeaturesHelper as StaticFeaturesHelper"
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"id": "3328d6d7-f636-4293-a08e-9a9fd095137c",
"metadata": {},
"outputs": [
Expand All @@ -132,7 +141,7 @@
"<module 'Helpers.StaticFeaturesHelper' from '/home/jgiezendanner/UA/cvpr23-earthvision-CNN-LSTM-Inundation/Source/Helpers/StaticFeaturesHelper.py'>"
]
},
"execution_count": 9,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -143,6 +152,24 @@
"importlib.reload(StaticFeaturesHelper)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "6478233c-514d-4979-b311-f012570a82d4",
"metadata": {},
"outputs": [],
"source": [
"from ModelClasses.Model import CNNLSTM as CNNLSTM"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0e8482fb-145a-4279-99e9-77200734b12e",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "2bf89542-32bd-4c7b-93b9-7e7d2e077dfa",
Expand All @@ -153,31 +180,41 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 28,
"id": "098dbc28-c469-43e3-a0c4-d23761faffe8",
"metadata": {},
"outputs": [],
"source": [
"dataPath = Path('../../Data/')"
"dataPath = Path('../../Data/ModelData/Data')"
]
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 29,
"id": "064b639c-fb0f-467f-97c6-4f65140b27b2",
"metadata": {},
"outputs": [],
"source": [
"lstmDF = pd.read_json(dataPath/'lstmFiles.json') # dataframe for LSTM with corresponding data"
]
},
{
"cell_type": "markdown",
"id": "7bb6688b-7ed1-4f67-9403-b3f35107fc45",
"metadata": {},
"source": [
"## Define number of time steps for LSTM"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dd5a72cb-5052-4bd6-aa20-0fb0c18a0629",
"execution_count": 30,
"id": "87963ade-b336-4521-98f7-004c6af18b94",
"metadata": {},
"outputs": [],
"source": []
"source": [
"timeSteps = 10"
]
},
{
"cell_type": "markdown",
Expand All @@ -189,34 +226,33 @@
},
{
"cell_type": "code",
"execution_count": 12,
"id": "bbfdc095-58b4-439a-9d01-bd9d01d3dbd9",
"execution_count": 31,
"id": "1540353d-c714-4e16-a17a-9407a983b02a",
"metadata": {},
"outputs": [],
"source": [
"def getStaticFeaturesFromLabel(filePath):\n",
" elevation = np.expand_dims(StaticFeaturesHelper.getScaledElevation(filePath.parent.parent/'Elevation'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" slopeFile = np.expand_dims(StaticFeaturesHelper.getScaledHAND(filePath.parent.parent/'Slope'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" hand = np.expand_dims(StaticFeaturesHelper.getSlope(filePath.parent.parent/'HAND'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" return np.concatenate((elevation, slopeFile, hand))\n",
" "
"def getModisFileFromLabel(filePath):\n",
" fileDir = filePath.parent.parent/\"MOD09A1.061\"\n",
" return [fileDir/item for item in lstmDF[lstmDF.File == filePath.name].FeatureFiles.values[0]]"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "1540353d-c714-4e16-a17a-9407a983b02a",
"execution_count": 32,
"id": "bbfdc095-58b4-439a-9d01-bd9d01d3dbd9",
"metadata": {},
"outputs": [],
"source": [
"def getModisFileFromLabel(filePath):\n",
" fileDir = filePath.parent.parent/\"MOD09A1.061\"\n",
" return [fileDir/item for item in lstmDF[lstmDF.File == filePath.name].FeatureFiles.values[0]]"
"def getStaticFeaturesFromLabel(filePath):\n",
" elevation = np.expand_dims(StaticFeaturesHelper.getScaledElevation(filePath.parent.parent/'Elevation'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" slopeFile = np.expand_dims(StaticFeaturesHelper.getScaledHAND(filePath.parent.parent/'Slope'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" hand = np.expand_dims(StaticFeaturesHelper.getSlope(filePath.parent.parent/'HAND'/('_'.join(filePath.stem.split('_')[0:2]) + '.tif')), 0)\n",
" return np.concatenate((elevation, slopeFile, hand))"
]
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 33,
"id": "db502cd6-576b-4b35-bf5f-b006efeaa478",
"metadata": {},
"outputs": [],
Expand All @@ -227,12 +263,11 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 34,
"id": "9634c24d-05bf-48b0-a8d1-570593e5c24c",
"metadata": {},
"outputs": [],
"source": [
"timeSteps = 10\n",
"# Open MODIS files and indices\n",
"def open_features(fn, chnls=None):\n",
" # Stack MODIS time steps\n",
Expand Down Expand Up @@ -274,7 +309,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 35,
"id": "df48333e-f04b-41e3-80c1-24427b895a56",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -331,7 +366,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 36,
"id": "3e8901c6-1bde-487b-bb21-829601cf2ffb",
"metadata": {},
"outputs": [],
Expand All @@ -351,7 +386,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 37,
"id": "00a2c0f3-9661-49a8-bb9d-9ca901f0e304",
"metadata": {},
"outputs": [],
Expand All @@ -373,7 +408,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 38,
"id": "imported-transcription",
"metadata": {},
"outputs": [],
Expand All @@ -384,7 +419,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 39,
"id": "15fccd7e-4dde-4b14-8ed0-d3cb08983437",
"metadata": {},
"outputs": [],
Expand All @@ -401,90 +436,6 @@
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "112ee282-6f7f-4268-a331-1d957106795e",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "ed29d63b-d46c-467b-8f51-6f74eee01664",
"metadata": {},
"source": [
"## Define model"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "7f55afd3-04ca-4826-b93e-8a37fdb13c97",
"metadata": {},
"outputs": [],
"source": [
"class Net(nn.Module):\n",
" def __init__(self, nbFeatures=10, initSize=32, nbLayers=1, nbTimeSteps=timeSteps, input_size=32*32, hidden_size=32*32):\n",
" super().__init__()\n",
" \n",
" self.nbFeatures = nbFeatures\n",
" self.input_size = input_size\n",
" self.nbTimeSteps = nbTimeSteps\n",
" \n",
" # define helper functions for convolutions, either single convolution, or combined with res block\n",
" def conv2_single(ni,nf): return nn.Conv2d(ni, nf, kernel_size=3, padding=1, padding_mode='reflect', stride=1)\n",
" def conv2(ni,nf): return nn.Conv2d(ni, nf, groups=nbTimeSteps, kernel_size=3, padding=1, padding_mode='reflect', stride=1)\n",
" def conv2_and_res(ni, nf): return nn.Sequential(conv2(ni,nf), ResBlock(2, ni, nf, groups=nbTimeSteps, stride=1))\n",
" \n",
" # Create CNN A\n",
" # note that groups are defined by number of time steps, i.e. each time step is applied the same CNN separatly\n",
" self.cnn = nn.Sequential(\n",
" conv2(nbFeatures*nbTimeSteps,nbTimeSteps*initSize)\n",
" )\n",
" \n",
" for k in range(nbLayers):\n",
" self.cnn = self.cnn.append(conv2_and_res(nbTimeSteps*initSize * 4**k, nbTimeSteps*(initSize * 2) * 4**k))\n",
" self.cnn = self.cnn.append(conv2(nbTimeSteps*(initSize * 2) * 4**(nbLayers-1)* 2,nbTimeSteps*1))\n",
" \n",
" # Create LSTM\n",
" self.LSTM = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=1, batch_first=True, bidirectional=False)\n",
" \n",
" # Create transpose convolution\n",
" self.convTrans = nn.ConvTranspose2d(1024,1,kernel_size=32)\n",
" \n",
" # Set the output to be a single convolution and a sigmoid\n",
" self.outLayer = nn.Sequential(conv2_single(2,1), SigmoidRange(0,1))\n",
"\n",
" def forward(self, x):\n",
" # get size of problem\n",
" batchSize = x.shape[0]\n",
" imgSize = x.shape[2:4]\n",
" \n",
" # pass all time steps through the CNN\n",
" x = self.cnn(x)\n",
" # extract time step 0\n",
" x_now = x[:,0,::].view((batchSize,1,imgSize[0],imgSize[1]))\n",
" # pass time step -1 to -9 through lstm\n",
" x, (_,_) = self.LSTM(x[:,1:,::].view((batchSize,self.nbTimeSteps-1,-1)))\n",
" # extract result and pass through transpose convolution\n",
" x = x[:,-1,:].view((batchSize,-1,1,1))\n",
" x = self.convTrans(x)\n",
" # concatenate lstm output and time step t\n",
" x = torch.cat((x_now,x),1)\n",
" # pass output through output layer\n",
" x = self.outLayer(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "59775b97-9443-4f0e-b68c-9ca4498a3ad9",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "05085e75-de2a-4cbf-8be7-612c3be32d9e",
Expand All @@ -495,7 +446,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 40,
"id": "a6ae714d-6657-4ef6-a160-c59c3c661bcf",
"metadata": {},
"outputs": [],
Expand All @@ -505,7 +456,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 41,
"id": "62aaf0b2-baaf-4dad-8b3a-537ad25050b0",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -544,7 +495,7 @@
" loss_fn = MSELossFlat()\n",
"\n",
" # create model\n",
" model = Net()\n",
" model = CNNLSTM(nbTimeSteps = timeSteps)\n",
"\n",
" # create learner\n",
" learn = Learner(dl, model, loss_func = loss_fn, metrics=acc_metric, opt_func=ranger, cbs=CSVLogger(append=True, fname='history_' + str(leaveOutYear) + '.csv'))\n",
Expand Down
Loading

0 comments on commit 6fc3877

Please sign in to comment.