diff --git a/develop/_modules/openvino_xai/explainer/explanation.html b/develop/_modules/openvino_xai/explainer/explanation.html index 278d1f4e..3e9f5781 100644 --- a/develop/_modules/openvino_xai/explainer/explanation.html +++ b/develop/_modules/openvino_xai/explainer/explanation.html @@ -535,21 +535,58 @@
Dumps saliency map.
+save(dir_path: Path | str, prefix: str = '', postfix: str = '', confidence_scores: Dict[int, float] | None = None) None [source]# +Dumps saliency map images to the specified directory.
+Allows flexibly name the files with the prefix and postfix. +{prefix} + target_id + {postfix}.jpg
+Also allows to add confidence scores to the file names. +{prefix} + target_id + {postfix} + confidence.jpg
+save(output_dir) -> aeroplane.jpg +save(output_dir, prefix=”image_name_target_”) -> image_name_target_aeroplane.jpg +save(output_dir, postfix=”_class_map”) -> aeroplane_class_map.jpg +save(
+++output_dir, prefix=”image_name_”, postfix=”_conf_”, confidence_scores=scores
+
) -> image_name_aeroplane_conf_0.85.jpg
+The directory path where the saliency maps will be saved.
+Path | str
+Optional prefix for the saliency map names. Default is an empty string.
+str
+Optional postfix for the saliency map names. Default is an empty string.
+str
+Dict with confidence scores for each class index. Default is None.
+Dict[int, float] | None
+XAI algorithms.
Bases: BlackBoxXAIMethod
AISE explains classification models in black-box mode using AISE: Adaptive Input Sampling for Explanation of Black-box Models @@ -726,7 +767,7 @@
model (ov.Model) – OpenVINO model.
postprocess_fn (Callable[[OVDict], np.ndarray]) – Post-processing function that extract scores from IR model output.
postprocess_fn (Callable[[Mapping], np.ndarray]) – Post-processing function that extract scores from IR model output.
preprocess_fn (Callable[[np.ndarray], np.ndarray]) – Pre-processing function, identity function by default (assume input images are already preprocessed by user).
device_name (str) – Device type name.
model (ov.Model) – OpenVINO model.
postprocess_fn (Callable[[OVDict], np.ndarray]) – Post-processing function that extract scores from IR model output.
postprocess_fn (Callable[[Mapping], np.ndarray]) – Post-processing function that extract scores from IR model output.
preprocess_fn (Callable[[np.ndarray], np.ndarray]) – Pre-processing function, identity function by default (assume input images are already preprocessed by user).
device_name (str) – Device type name.
Here’s the example how we can avoid passing preprocess_fn
by preprocessing data beforehand (like resizing and adding a batch dimension).
import cv2
import numpy as np
+from typing import Mapping
import openvino.runtime as ov
-from from typing import Mapping
import openvino_xai as xai
@@ -539,7 +541,7 @@ Running without )
# Save saliency maps
-explanation.save("output_path", "name")
+explanation.save("output_path", "name_")
@@ -547,8 +549,8 @@ Running without Specifying preprocess_fn
#
import cv2
import numpy as np
-import openvino.runtime as ov
from typing import Mapping
+import openvino.runtime as ov
import openvino_xai as xai
@@ -585,7 +587,7 @@ Specifying prep
)
# Save saliency maps
-explanation.save("output_path", "name")
+explanation.save("output_path", "name_")
@@ -640,7 +642,7 @@ White-Box mode)
# Save saliency maps
-explanation.save("output_path", "name")
+explanation.save("output_path", "name_")
import cv2
import numpy as np
import openvino.runtime as ov
+
import openvino_xai as xai
+from openvino_xai.explainer import ExplainMode
def preprocess_fn(image: np.ndarray) -> np.ndarray:
"""Preprocess the input image."""
@@ -780,6 +784,92 @@ Plot saliency maps
+Saving saliency maps#
+You can easily save saliency maps with flexible naming options by using a prefix
and postfix
. The prefix
allows saliency maps from the same image to have consistent naming.
+The format for naming is:
+{prefix} + target_id + {postfix}.jpg
+Additionally, you can include the confidence score for each class in the saved saliency map’s name.
+{prefix} + target_id + {postfix} + confidence.jpg
+import cv2
+import numpy as np
+import openvino.runtime as ov
+from typing import Mapping
+
+import openvino_xai as xai
+from openvino_xai.explainer import ExplainMode
+
+def preprocess_fn(image: np.ndarray) -> np.ndarray:
+ """Preprocess the input image."""
+ x = cv2.resize(src=image, dsize=(224, 224))
+ x = x.transpose((2, 0, 1))
+ processed_image = np.expand_dims(x, 0)
+ return processed_image
+
+def postprocess_fn(output: Mapping):
+ """Postprocess the model output."""
+ output = softmax(output)
+ return output[0]
+
+def softmax(x: np.ndarray) -> np.ndarray:
+ """Compute softmax values of x."""
+ e_x = np.exp(x - np.max(x))
+ return e_x / e_x.sum()
+
+# Generate and process saliency maps (as many as required, sequentially)
+image = cv2.imread("path/to/image.jpg")
+
+# Create ov.Model
+MODEL_PATH = "path/to/model.xml"
+model = ov.Core().read_model(MODEL_PATH) # type: ov.Model
+
+# The Explainer object will prepare and load the model once in the beginning
+explainer = xai.Explainer(
+ model,
+ task=xai.Task.CLASSIFICATION,
+ preprocess_fn=preprocess_fn,
+)
+
+voc_labels = [
+ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
+ 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
+]
+
+# Get predicted confidences for the image
+compiled_model = core.compile_model(model=model, device_name="AUTO")
+logits = compiled_model(preprocess_fn(image))[0]
+result_infer = postprocess_fn(logits)
+
+# Generate list of predicted class indices and scores
+result_idxs = np.argwhere(result_infer > 0.4).flatten()
+result_scores = result_infer[result_idxs]
+
+# Generate dict {class_index: confidence} to save saliency maps
+scores_dict = {i: score for i, score in zip(result_idxs, result_scores)}
+
+# Run explanation
+explanation = explainer(
+ image,
+ explain_mode=ExplainMode.WHITEBOX,
+ label_names=voc_labels,
+ target_explain_labels=result_idxs, # target classes to explain
+)
+
+# Save saliency maps flexibly
+OUTPUT_PATH = "output_path"
+explanation.save(OUTPUT_PATH) # aeroplane.jpg
+explanation.save(OUTPUT_PATH, "image_name_target_") # image_name_target_aeroplane.jpg
+explanation.save(OUTPUT_PATH, prefix="image_name_target_") # image_name_target_aeroplane.jpg
+explanation.save(OUTPUT_PATH, postfix="_class_map") # aeroplane_class_map.jpg
+explanation.save(OUTPUT_PATH, prefix="image_name_", postfix="_class_map") # image_name_aeroplane_class_map.jpg
+
+# Save saliency maps with confidence scores
+explanation.save(
+ OUTPUT_PATH, prefix="image_name_", postfix="_conf_", confidence_scores=scores_dict
+) # image_name_aeroplane_conf_0.85.jpg
+
+
+
Example scripts#
More usage scenarios that can be used with your own models and images as arguments are available in examples.
@@ -858,6 +948,7 @@ Example scriptsBlack-Box mode
XAI insertion (white-box usage)
Plot saliency maps
+Saving saliency maps
Example scripts