-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathPoisoningAttackSVM.py
30 lines (23 loc) · 1.18 KB
/
PoisoningAttackSVM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from art.attacks.poisoning import PoisoningAttackSVM
from art.estimators.classification import SklearnClassifier
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn import datasets
# Carga un conjunto de datos de ejemplo
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42)
# Crea y entrena un modelo de clasificación básico
model = svm.SVC()
model.fit(X_train, y_train)
# Crea un clasificador ART a partir del modelo de sklearn
art_classifier = SklearnClassifier(model=model)
# Crea un ataque de envenenamiento con ART
attack = PoisoningAttackSVM(estimator=art_classifier, step=0.1, eps=0.1, x_val=X_test, y_val=y_test)
# Genera ejemplos de envenenamiento
X_train_adv, y_train_adv = attack.poison(x=X_train, y=y_train)
# Vuelve a entrenar el modelo con los datos envenenados
model.fit(X_train_adv, y_train_adv)
# Verifica la precisión del modelo en el conjunto de prueba original
predictions = model.predict(X_test)
accuracy = np.sum(predictions == y_test) / len(y_test)
print(f'Accuracy on original test examples after poisoning: {accuracy * 100}%')