Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pre-commit.ci] pre-commit autoupdate #312

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ repos:

# Ruff, the Python auto-correcting linter/formatter written in Rust
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.9
rev: v0.9.1
hooks:
- id: ruff
args: ["--fix", "--show-fixes"]
Expand Down
3 changes: 1 addition & 2 deletions examples/user_stories/generate_disclosure_risk_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,7 @@
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error: # pragma:no cover
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

user_story = config["user_story"]
Expand Down
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_1/user_story_1_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_2/user_story_2_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_3/user_story_3_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_4/user_story_4_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_7/user_story_7_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
3 changes: 1 addition & 2 deletions examples/user_stories/user_story_8/user_story_8_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,7 @@ def run_user_story(release_config: dict):
config = yaml.load(handle, Loader=yaml.loader.SafeLoader)
except AttributeError as error:
print(
"Invalid command. Try --help to get more details"
f"error message is {error}"
f"Invalid command. Try --help to get more detailserror message is {error}"
)

run_user_story(config)
2 changes: 1 addition & 1 deletion sacroml/attacks/worst_case_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def _get_global_metrics(self, attack_metrics: list) -> dict:
)

global_metrics["null_auc_3sd_range"] = (
f"{0.5 - 3*auc_std:.4f} -> {0.5 + 3*auc_std:.4f}"
f"{0.5 - 3 * auc_std:.4f} -> {0.5 + 3 * auc_std:.4f}"
)
global_metrics["n_sig_auc_p_vals"] = self._get_n_significant(
auc_p_vals, self.p_thresh
Expand Down
11 changes: 5 additions & 6 deletions sacroml/safemodel/reporting.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def get_reporting_string(**kwargs: dict) -> str:
f"epochs = {inter_params['epochs']}.\n"
),
"basic_params_differ": (
"Warning: basic parameters differ in " f"{inter_params['length']} places:\n"
f"Warning: basic parameters differ in {inter_params['length']} places:\n"
),
"param_changed_from_to": (
f"parameter {inter_params['key']} changed from {inter_params['val']} "
Expand Down Expand Up @@ -200,10 +200,10 @@ def get_reporting_string(**kwargs: dict) -> str:
f"{inter_params['diffs_list']}"
),
"no_dp_gradients_key": (
"optimizer does not contain key _was_dp_gradients_called" " so is not DP."
"optimizer does not contain key _was_dp_gradients_called so is not DP."
),
"found_dp_gradients_key": (
"optimizer does contain key _was_dp_gradients_called" " so should be DP."
"optimizer does contain key _was_dp_gradients_called so should be DP."
),
"changed_opt_no_fit": (
"optimizer has been changed but fit() has not been rerun."
Expand All @@ -229,8 +229,7 @@ def get_reporting_string(**kwargs: dict) -> str:
f"file gave this error message: {inter_params['er']}"
),
"loading_from_unsupported": (
f"loading from a {inter_params['suffix']} "
"file is currently not supported"
f"loading from a {inter_params['suffix']} file is currently not supported"
),
"opt_config_changed": ("Optimizer config has been changed since training."),
"epsilon_above_normal": (
Expand All @@ -239,7 +238,7 @@ def get_reporting_string(**kwargs: dict) -> str:
"Discussion with researcher needed.\n"
),
"recommend_further_discussion": (
f"Recommendation is further discussion needed " f"{inter_params['msg']}.\n"
f"Recommendation is further discussion needed {inter_params['msg']}.\n"
),
"recommend_allow_release": ("Recommendation is to allow release.\n"),
"allow_release_eps_below_max": (
Expand Down
168 changes: 84 additions & 84 deletions tests/attacks/test_structural_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,12 @@ def test_unnecessary_risk():
]
for idx, paramdict in enumerate(risky_param_dicts):
model = DecisionTreeClassifier(**paramdict)
errstr = f" unnecessary risk with rule {idx}" f"params are {model.get_params()}"
errstr = f" unnecessary risk with rule {idx}params are {model.get_params()}"
assert sa.get_unnecessary_risk(model), errstr
model = DecisionTreeClassifier(max_depth=1, min_samples_leaf=150)
assert not sa.get_unnecessary_risk(
model
), f"should be non-disclosive with {model.get_params}"
assert not sa.get_unnecessary_risk(model), (
f"should be non-disclosive with {model.get_params}"
)

# now random forest
risky_param_dicts = [
Expand All @@ -114,12 +114,12 @@ def test_unnecessary_risk():
]
for idx, paramdict in enumerate(risky_param_dicts):
model = RandomForestClassifier(**paramdict)
errstr = f" unnecessary risk with rule {idx}" f"params are {model.get_params()}"
errstr = f" unnecessary risk with rule {idx}params are {model.get_params()}"
assert sa.get_unnecessary_risk(model), errstr
model = RandomForestClassifier(max_depth=1, n_estimators=25, min_samples_leaf=150)
assert not sa.get_unnecessary_risk(
model
), f"should be non-disclosive with {model.get_params}"
assert not sa.get_unnecessary_risk(model), (
f"should be non-disclosive with {model.get_params}"
)

# finally xgboost
risky_param_dicts = [
Expand All @@ -141,12 +141,12 @@ def test_unnecessary_risk():
]
for idx, paramdict in enumerate(risky_param_dicts):
model = XGBClassifier(**paramdict)
errstr = f" unnecessary risk with rule {idx}" f"params are {model.get_params()}"
errstr = f" unnecessary risk with rule {idx}params are {model.get_params()}"
assert sa.get_unnecessary_risk(model), errstr
model = XGBClassifier(min_child_weight=10)
assert not sa.get_unnecessary_risk(
model
), f"should be non-disclosive with {model.get_params}"
assert not sa.get_unnecessary_risk(model), (
f"should be non-disclosive with {model.get_params}"
)


def test_non_trees():
Expand All @@ -170,12 +170,12 @@ def test_dt():
myattack = sa.StructuralAttack()
myattack.attack(target)
assert not myattack.dof_risk, "should be no DoF risk with decision stump"
assert (
not myattack.k_anonymity_risk
), "should be no k-anonymity risk with min_samples_leaf 150"
assert (
not myattack.class_disclosure_risk
), "no class disclosure risk for stump with min samples leaf 150"
assert not myattack.k_anonymity_risk, (
"should be no k-anonymity risk with min_samples_leaf 150"
)
assert not myattack.class_disclosure_risk, (
"no class disclosure risk for stump with min samples leaf 150"
)
assert not myattack.unnecessary_risk, "not unnecessary risk if max_depth < 3.5"

# highly disclosive
Expand All @@ -184,15 +184,15 @@ def test_dt():
myattack = sa.StructuralAttack()
myattack.attack(target)
assert not myattack.dof_risk, "should be no DoF risk with decision stump"
assert (
myattack.k_anonymity_risk
), "should be k-anonymity risk with unlimited depth and min_samples_leaf 5"
assert (
myattack.class_disclosure_risk
), "should be class disclosure risk with unlimited depth and min_samples_leaf 5"
assert (
myattack.unnecessary_risk
), " unnecessary risk with unlimited depth and min_samples_leaf 5"
assert myattack.k_anonymity_risk, (
"should be k-anonymity risk with unlimited depth and min_samples_leaf 5"
)
assert myattack.class_disclosure_risk, (
"should be class disclosure risk with unlimited depth and min_samples_leaf 5"
)
assert myattack.unnecessary_risk, (
" unnecessary risk with unlimited depth and min_samples_leaf 5"
)


def test_adaboost():
Expand All @@ -206,9 +206,9 @@ def test_adaboost():
myattack.THRESHOLD = 2
myattack.attack(target)
assert not myattack.dof_risk, "should be no DoF risk with just 2 decision stumps"
assert (
not myattack.k_anonymity_risk
), "should be no k-anonymity risk with only 2 stumps"
assert not myattack.k_anonymity_risk, (
"should be no k-anonymity risk with only 2 stumps"
)
assert not myattack.class_disclosure_risk, "no class disclosure risk for 2 stumps"
assert not myattack.unnecessary_risk, " unnecessary risk not defined for adaboost"

Expand All @@ -222,12 +222,12 @@ def test_adaboost():
myattack2 = sa.StructuralAttack()
myattack2.attack(target)
assert myattack2.dof_risk, "should be DoF risk with adaboost of deep trees"
assert (
myattack2.k_anonymity_risk
), "should be k-anonymity risk with adaboost unlimited depth and min_samples_leaf 2"
assert (
myattack2.class_disclosure_risk
), "should be class risk with adaboost unlimited depth and min_samples_leaf 2"
assert myattack2.k_anonymity_risk, (
"should be k-anonymity risk with adaboost unlimited depth and min_samples_leaf 2"
)
assert myattack2.class_disclosure_risk, (
"should be class risk with adaboost unlimited depth and min_samples_leaf 2"
)
assert not myattack2.unnecessary_risk, " unnecessary risk not define for adaboost"


Expand All @@ -238,15 +238,15 @@ def test_rf():
target = get_target("rf", **param_dict)
myattack = sa.StructuralAttack()
myattack.attack(target)
assert (
not myattack.dof_risk
), "should be no DoF risk with small forest of decision stumps"
assert (
not myattack.k_anonymity_risk
), "should be no k-anonymity risk with min_samples_leaf 150"
assert (
not myattack.class_disclosure_risk
), "no class disclosure risk for stumps with min samples leaf 150"
assert not myattack.dof_risk, (
"should be no DoF risk with small forest of decision stumps"
)
assert not myattack.k_anonymity_risk, (
"should be no k-anonymity risk with min_samples_leaf 150"
)
assert not myattack.class_disclosure_risk, (
"no class disclosure risk for stumps with min samples leaf 150"
)
assert not myattack.unnecessary_risk, "not unnecessary risk if max_depth < 3.5"

# highly disclosive
Expand All @@ -260,15 +260,15 @@ def test_rf():
myattack = sa.StructuralAttack()
myattack.attack(target)
assert myattack.dof_risk, "should be DoF risk with forest of deep trees"
assert (
myattack.k_anonymity_risk
), "should be k-anonymity risk with unlimited depth and min_samples_leaf 5"
assert (
myattack.class_disclosure_risk
), "should be class disclsoure risk with unlimited depth and min_samples_leaf 5"
assert (
myattack.unnecessary_risk
), " unnecessary risk with unlimited depth and min_samples_leaf 5"
assert myattack.k_anonymity_risk, (
"should be k-anonymity risk with unlimited depth and min_samples_leaf 5"
)
assert myattack.class_disclosure_risk, (
"should be class disclsoure risk with unlimited depth and min_samples_leaf 5"
)
assert myattack.unnecessary_risk, (
" unnecessary risk with unlimited depth and min_samples_leaf 5"
)


def test_xgb():
Expand All @@ -278,15 +278,15 @@ def test_xgb():
target = get_target("xgb", **param_dict)
myattack = sa.StructuralAttack()
myattack.attack(target)
assert (
not myattack.dof_risk
), "should be no DoF risk with small xgb of decision stumps"
assert (
not myattack.k_anonymity_risk
), "should be no k-anonymity risk with min_samples_leaf 150"
assert (
not myattack.class_disclosure_risk
), "no class disclosure risk for stumps with min child weight 50"
assert not myattack.dof_risk, (
"should be no DoF risk with small xgb of decision stumps"
)
assert not myattack.k_anonymity_risk, (
"should be no k-anonymity risk with min_samples_leaf 150"
)
assert not myattack.class_disclosure_risk, (
"no class disclosure risk for stumps with min child weight 50"
)
assert myattack.unnecessary_risk == 0, "not unnecessary risk if max_depth < 3.5"

# highly disclosive
Expand All @@ -295,12 +295,12 @@ def test_xgb():
myattack2 = sa.StructuralAttack()
myattack2.attack(target2)
assert myattack2.dof_risk, "should be DoF risk with xgb of deep trees"
assert (
myattack2.k_anonymity_risk
), "should be k-anonymity risk with depth 50 and min_child_weight 1"
assert (
myattack2.class_disclosure_risk
), "should be class disclosure risk with xgb lots of deep trees"
assert myattack2.k_anonymity_risk, (
"should be k-anonymity risk with depth 50 and min_child_weight 1"
)
assert myattack2.class_disclosure_risk, (
"should be class disclosure risk with xgb lots of deep trees"
)
assert myattack2.unnecessary_risk, " unnecessary risk with these xgb params"


Expand All @@ -319,15 +319,15 @@ def test_sklearnmlp():
paramstr = ""
for key, val in safeparams.items():
paramstr += f"{key}:{val}\n"
assert (
not myattack.dof_risk
), f"should be no DoF risk with small mlp with params {paramstr}"
assert (
not myattack.k_anonymity_risk
), f"should be no k-anonymity risk with params {paramstr}"
assert (
myattack.class_disclosure_risk
), f"should be class disclosure risk with params {paramstr}"
assert not myattack.dof_risk, (
f"should be no DoF risk with small mlp with params {paramstr}"
)
assert not myattack.k_anonymity_risk, (
f"should be no k-anonymity risk with params {paramstr}"
)
assert myattack.class_disclosure_risk, (
f"should be class disclosure risk with params {paramstr}"
)
assert not myattack.unnecessary_risk, "not unnecessary risk for mlps at present"

# highly disclosive
Expand All @@ -344,12 +344,12 @@ def test_sklearnmlp():
myattack2 = sa.StructuralAttack()
myattack2.attack(target2)
assert myattack2.dof_risk, f"should be DoF risk with this MLP:\n{uparamstr}"
assert (
myattack2.k_anonymity_risk
), "559/560 records should be k-anonymity risk with this MLP:\n{uparamstr}"
assert (
myattack2.class_disclosure_risk
), "should be class disclosure risk with this MLP:\n{uparamstr}"
assert myattack2.k_anonymity_risk, (
"559/560 records should be k-anonymity risk with this MLP:\n{uparamstr}"
)
assert myattack2.class_disclosure_risk, (
"should be class disclosure risk with this MLP:\n{uparamstr}"
)
assert not myattack2.unnecessary_risk, "no unnecessary risk yet for MLPClassifiers"


Expand Down
Loading
Loading