-
Notifications
You must be signed in to change notification settings - Fork 0
/
creditcard.py
70 lines (60 loc) · 3.03 KB
/
creditcard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# -*- coding: utf-8 -*-
"""Creditcard.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1E-ieDz5Lp1SFGgFoEYwuQQeeUwD5IbwT
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import VotingClassifier, GradientBoostingClassifier, StackingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
import xgboost as xgb
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, classification_report, f1_score, accuracy_score
from sklearn import metrics
import warnings
warnings.filterwarnings("ignore")
# Load the data into a pandas dataframe
data = pd.read_csv('credit_card_data.csv')
# Drop rows with missing values
data = data.dropna()
# Preprocessing the data
features = data.iloc[:,0:-1]
labels = data.iloc[:,-1]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=42)
# Define the individual models
model1 = LogisticRegression(max_iter=1000, random_state= 64 )
model2 = DecisionTreeClassifier(random_state= 40)
model3 = RandomForestClassifier(random_state= 100)
model4 = SVC(kernel='rbf', probability=True, random_state=100)
model5 = GaussianNB()
# Create a voting classifier using the individual models
#ensemble = RandomForestClassifier(random_state= 100)
#ensemble = VotingClassifier(estimators=[('lr', model1), ('dt', model2), ('rf', model3)], voting='hard')
#ensemble = VotingClassifier(estimators=[('lr', model1), ('dt', model2)], voting='soft')
#ensemble = GradientBoostingClassifier(n_estimators=400, learning_rate=0.05, random_state=100, max_features=5 )
#ensemble = BaggingClassifier(base_estimator = model3, n_estimators = 200, random_state = 90)
#ensemble = AdaBoostClassifier(RandomForestClassifier(random_state=90), n_estimators=200)
#ensemble = StackingClassifier(estimators=[('lr', model1), ('dt', model2)])
#ensemble = SVC(kernel='linear', probability=False, decision_function_shape='ovr', random_state=100)
ensemble = xgb.XGBClassifier(n_estimators=100, max_depth=3, learning_rate=0.1)
#ensemble = GaussianNB()
# Fit the voting classifier on the training data
ensemble.fit(X_train, y_train)
# Use the fitted model to make predictions on the test data
y_pred = ensemble.predict(X_test)
# Evaluate the performance of the model using various metrics
#print("Confusion matrix:\n", confusion_matrix(y_test, y_pred))
#print("\nClassification report:\n", classification_report(y_test, y_pred))
#print("\nNumber of anomalies detected:", sum(y_pred))
print("\nAccuracy score:", accuracy_score(y_test, y_pred))
# Calculate precision and recall
precision = metrics.precision_score(y_test, y_pred)
recall = metrics.recall_score(y_test, y_pred)
print("Precision:", precision)
print("Recall:", recall)
print("F1 score:", f1_score(y_test, y_pred))