-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
127 lines (104 loc) · 4.04 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# -*- coding: utf-8 -*-
"""
RUN Performs an LSTM machine learning on the given data
"
Created by : newbi
Created on Sat Dec 12 12:14:00 2020
Modified on : 12.12.2020
Based on : ML class at EPFL
Info :
UPDATE :
1.
"""
import pandas as pd
from MK_run import mk_predict
from LSTM_run import lstm_predict
from RF_run import rf_predict
from TCN_run import tcn_predict
from Helpers.p_indicators import p_inds
from Helpers.performance_comparison import perf_comp
def run_models(model, history_window=10, hyperparam_opt=False, \
results_file = './Data/Results.csv'):
""" RUN_MODELS Runs the selected model with the given settings.
Runs the selected model and then prints out the metrics and generates
graphs.
Parameters
----------
model : String
Which model to run. Either "MK", "LSTM", "RF" or "TCN".
hyperparam_opt : Boolean
Whether to perform a hyperparameter optimization or not.
history_window : int
How many days to consider in memory.
Returns
-------
None.
"""
print("Performing prediction with {} model.".format(model.upper()))
if model.upper() == "MK":
pass
y_pred, y_true = mk_predict()
elif model.upper() == "LSTM":
pass
y_pred, y_true = lstm_predict(hyperparam_opt, history_window)
elif model.upper() == "RF":
pass
y_pred, y_true = rf_predict(history_window, hyperparam_opt)
elif model.upper() == "TCN":
y_pred, y_true = tcn_predict(hyperparam_opt, history_window)
else:
print("Unknown model chosen: ", model)
# get the performance indicators
print("Calculating performance indicators.")
class_dict = p_inds(y_true, y_pred)
# load previous indicators
df = pd.read_csv(results_file)
# find which row to update
row_idx = (df['Model'] == model.upper()) & \
(df['Memory'] == history_window)
# update current row
df.loc[row_idx,'Model'] = model.upper()
df.loc[row_idx,'Memory'] = history_window
df.loc[row_idx,'accuracy'] = class_dict['accuracy']
df.loc[row_idx,'precision'] = class_dict['weighted avg']['precision']
df.loc[row_idx,'f1'] = class_dict['weighted avg']['f1-score']
df.loc[row_idx,'recall'] = class_dict['weighted avg']['recall']
# save the data
df.to_csv(results_file,index=False)
# save the new figures
perf_comp(df)
if __name__ == "__main__":
# initialize variables
model = ""
hyperparam_opt = False
history_window = 1
# set list of valid models
valid_models = ['MK', 'LSTM', 'RF','TCN']
# find which model to use
while model.upper() not in valid_models:
model = input("Please choose a model:\n Choices are 'MK', 'LSTM', 'RF','TCN'\n")
print("The {} model will be used.".format(model.upper()))
# if not using Markov, ask the other parameters
if model.upper() != "MK":
hyperparam_opt = input("Would you like to perform hyperparameter tuning (1 for true/0 for false)?\n")
history_window = input("How long would you like the history window to be (days)?\nMust be 2, 10, 50, 100 or 150 if no tuning is done.\n")
# sanitize user input for hyperparam
if hyperparam_opt == "1":
print("Hyperparameter optimization will be performed.")
hyperparam_opt = True
else:
print("Hyperparameter optimization will NOT be performed.")
hyperparam_opt = False
# sanitize user input for history window
try:
if int(history_window) >= 1 & int(history_window) <= 150:
print("History window will be set to {} days.".format(history_window))
history_window = int(history_window)
else:
print("Invalid history window chosen. History window will be set to 1 day.")
history_window = 1
except:
print("Invalid history window chosen. History window will be set to 1 day.")
history_window = 1
# run the model
run_models(model, history_window, hyperparam_opt)