-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcv2.py
68 lines (56 loc) · 1.88 KB
/
cv2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score, KFold
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import make_scorer, mean_squared_error
# 데이터 생성
np.random.seed(2024)
x = uniform.rvs(size=30, loc=-4, scale=8)
y = np.sin(x) + norm.rvs(size=30, loc=0, scale=0.3)
# 데이터를 DataFrame으로 변환하고 다항 특징 추가
x_vars = np.char.add('x', np.arange(1, 21).astype(str))
X = pd.DataFrame(x, columns=['x'])
poly = PolynomialFeatures(degree=20, include_bias=False)
X_poly = poly.fit_transform(X)
X_poly=pd.DataFrame(
data=X_poly,
columns=x_vars
)
# 교차 검증 설정
kf = KFold(n_splits=5, shuffle=True, random_state=2024)
def rmse(model):
score = np.sqrt(-cross_val_score(model, X_poly, y, cv = kf,
n_jobs=-1, scoring = "neg_mean_squared_error").mean())
return(score)
lasso = Lasso(alpha=0.01)
ridge = Ridge(alpha=0.01)
rmse(lasso)
# 각 알파 값에 대한 교차 검증 점수 저장
alpha_values = np.arange(0, 10, 0.01)
mean_scores = np.zeros(len(alpha_values))
k=0
for alpha in alpha_values:
lasso = Lasso(alpha=alpha)
mean_scores[k] = rmse(lasso)
k += 1
# 결과를 DataFrame으로 저장
df = pd.DataFrame({
'lambda': alpha_values,
'validation_error': mean_scores
})
df
# 결과 시각화
plt.plot(df['lambda'], df['validation_error'], label='Validation Error', color='red')
plt.xlabel('Lambda')
plt.ylabel('Mean Squared Error')
plt.legend()
plt.title('Lasso Regression Train vs Validation Error')
plt.show()
# 최적의 alpha 값 찾기
optimal_alpha = df['lambda'][np.argmin(df['validation_error'])]
print("Optimal lambda:", optimal_alpha)