Skip to content

Commit

Permalink
Update workflows for TensorFlow/Keras 2.14
Browse files Browse the repository at this point in the history
Signed-off-by: Beat Buesser <[email protected]>
  • Loading branch information
beat-buesser committed Oct 30, 2023
1 parent d2f2592 commit 9614bb6
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 24 deletions.
3 changes: 2 additions & 1 deletion art/attacks/poisoning/poisoning_attack_svm.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ class PoisoningAttackSVM(PoisoningAttackWhiteBox):
"y_train",
"x_val",
"y_val",
"max_iter" "verbose",
"max_iter",
"verbose",
]
_estimator_requirements = (ScikitlearnSVC,)

Expand Down
26 changes: 13 additions & 13 deletions art/defences/detector/poison/clustering_analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,12 +227,12 @@ def analyze_by_relative_size(
if np.size(sizes) > 2:
raise ValueError(" RelativeSizeAnalyzer does not support more than two clusters.")
percentages = np.round(sizes / float(np.sum(sizes)), r_size)
poison_clusters = np.where(percentages < size_threshold)[0]
clean_clusters = np.where(percentages >= size_threshold)[0]
poison_clusters = np.where(percentages < size_threshold)
clean_clusters = np.where(percentages >= size_threshold)

for p_id in poison_clusters:
for p_id in poison_clusters[0]:
summary_poison_clusters[i][p_id] = 1
for c_id in clean_clusters:
for c_id in clean_clusters[0]:
summary_poison_clusters[i][c_id] = 0

assigned_clean = self.assign_class(clusters, clean_clusters, poison_clusters)
Expand All @@ -251,7 +251,7 @@ def analyze_by_relative_size(
report["Class_" + str(i)] = report_class

report["suspicious_clusters"] = report["suspicious_clusters"] + np.sum(summary_poison_clusters).item()
return np.asarray(all_assigned_clean), summary_poison_clusters, report
return np.asarray(all_assigned_clean, dtype=object), summary_poison_clusters, report

def analyze_by_silhouette_score(
self,
Expand Down Expand Up @@ -309,8 +309,8 @@ def analyze_by_silhouette_score(
if np.size(bins) > 2:
raise ValueError("Analyzer does not support more than two clusters.")
percentages = np.round(bins / float(np.sum(bins)), r_size)
poison_clusters = np.where(percentages < size_threshold)[0]
clean_clusters = np.where(percentages >= size_threshold)[0]
poison_clusters = np.where(percentages < size_threshold)
clean_clusters = np.where(percentages >= size_threshold)

# Generate report for class
silhouette_avg = round(silhouette_score(activations, clusters), r_silhouette)
Expand All @@ -324,26 +324,26 @@ def analyze_by_silhouette_score(
# Relative size of the clusters is suspicious
if silhouette_avg > silhouette_threshold:
# In this case the cluster is considered poisonous
clean_clusters = np.where(percentages < size_threshold)[0]
clean_clusters = np.where(percentages < size_threshold)
logger.info("computed silhouette score: %s", silhouette_avg)
dict_i.update(suspicious=True)
else:
poison_clusters = np.array([[]])
clean_clusters = np.where(percentages >= 0)[0]
poison_clusters = [[]]
clean_clusters = np.where(percentages >= 0)
dict_i.update(suspicious=False)
else:
# If relative size of the clusters is Not suspicious, we conclude it's not suspicious.
dict_i.update(suspicious=False)

report_class: Dict[str, Dict[str, bool]] = {"class_" + str(i): dict_i}

for p_id in poison_clusters:
for p_id in poison_clusters[0]:
summary_poison_clusters[i][p_id] = 1
for c_id in clean_clusters:
for c_id in clean_clusters[0]:
summary_poison_clusters[i][c_id] = 0

assigned_clean = self.assign_class(clusters, clean_clusters, poison_clusters)
all_assigned_clean.append(assigned_clean)
report.update(report_class)

return np.asarray(all_assigned_clean), summary_poison_clusters, report
return np.asarray(all_assigned_clean, dtype=object), summary_poison_clusters, report
2 changes: 1 addition & 1 deletion tests/defences/detector/poison/test_activation_defence.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def test_output_clusters(self):
clusters_by_class, _ = self.defence.cluster_activations(nb_clusters=nb_clusters)

# Verify expected number of classes
self.assertEqual(np.shape(clusters_by_class)[0], n_classes)
self.assertEqual(len(clusters_by_class), n_classes)
# Check we get the expected number of clusters:
found_clusters = len(np.unique(clusters_by_class[0]))
self.assertEqual(found_clusters, nb_clusters)
Expand Down
16 changes: 8 additions & 8 deletions tests/defences/detector/poison/test_ground_truth_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ def test_analyze_correct_all_clean(self):
# print(json_object)
for i in range(self.n_classes):
res_class_i = json_object["class_" + str(i)]
self.assertEqual(res_class_i["TruePositive"]["rate"], "N/A")
self.assertEqual(res_class_i["TruePositive"]["rate"], -1)
self.assertEqual(res_class_i["TrueNegative"]["rate"], 100)
self.assertEqual(res_class_i["FalseNegative"]["rate"], "N/A")
self.assertEqual(res_class_i["FalseNegative"]["rate"], -1)
self.assertEqual(res_class_i["FalsePositive"]["rate"], 0)

self.assertEqual(res_class_i["TruePositive"]["numerator"], 0)
Expand Down Expand Up @@ -99,9 +99,9 @@ def test_analyze_correct_all_poison(self):
for i in range(self.n_classes):
res_class_i = json_object["class_" + str(i)]
self.assertEqual(res_class_i["TruePositive"]["rate"], 100)
self.assertEqual(res_class_i["TrueNegative"]["rate"], "N/A")
self.assertEqual(res_class_i["TrueNegative"]["rate"], -1)
self.assertEqual(res_class_i["FalseNegative"]["rate"], 0)
self.assertEqual(res_class_i["FalsePositive"]["rate"], "N/A")
self.assertEqual(res_class_i["FalsePositive"]["rate"], -1)

self.assertEqual(res_class_i["TruePositive"]["numerator"], self.n_dp)
self.assertEqual(res_class_i["TruePositive"]["denominator"], self.n_dp)
Expand Down Expand Up @@ -166,9 +166,9 @@ def test_analyze_fully_misclassified(self):
for i in range(self.n_classes):
res_class_i = json_object["class_" + str(i)]
self.assertEqual(res_class_i["TruePositive"]["rate"], 0)
self.assertEqual(res_class_i["TrueNegative"]["rate"], "N/A")
self.assertEqual(res_class_i["TrueNegative"]["rate"], -1)
self.assertEqual(res_class_i["FalseNegative"]["rate"], 100)
self.assertEqual(res_class_i["FalsePositive"]["rate"], "N/A")
self.assertEqual(res_class_i["FalsePositive"]["rate"], -1)

self.assertEqual(res_class_i["TruePositive"]["numerator"], 0)
self.assertEqual(res_class_i["TruePositive"]["denominator"], self.n_dp)
Expand Down Expand Up @@ -200,9 +200,9 @@ def test_analyze_fully_misclassified_rev(self):
pprint.pprint(json_object)
for i in range(self.n_classes):
res_class_i = json_object["class_" + str(i)]
self.assertEqual(res_class_i["TruePositive"]["rate"], "N/A")
self.assertEqual(res_class_i["TruePositive"]["rate"], -1)
self.assertEqual(res_class_i["TrueNegative"]["rate"], 0)
self.assertEqual(res_class_i["FalseNegative"]["rate"], "N/A")
self.assertEqual(res_class_i["FalseNegative"]["rate"], -1)
self.assertEqual(res_class_i["FalsePositive"]["rate"], 100)

self.assertEqual(res_class_i["TruePositive"]["numerator"], 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def test_loss_functions(
art_warning(e)


@pytest.mark.skip_framework("non_dl_frameworks", "huggingface", "tensorflow2")
@pytest.mark.skip_framework("non_dl_frameworks", "huggingface", "tensorflow2", "kerastf")
def test_pickle(art_warning, image_dl_estimator, image_dl_estimator_defended, tmp_path):
try:
full_path = os.path.join(tmp_path, "my_classifier.p")
Expand Down

0 comments on commit 9614bb6

Please sign in to comment.