From c72fb89c07600f78b13f152ddb09bb715ace37ab Mon Sep 17 00:00:00 2001 From: LakshmiKalaKadali <149650845+LakshmiKalaKadali@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:39:44 +0530 Subject: [PATCH] Typos fixed in instance_segmentation.ipynb --- docs/vision/instance_segmentation.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/vision/instance_segmentation.ipynb b/docs/vision/instance_segmentation.ipynb index 8e2ed871794..1f8f21eddab 100644 --- a/docs/vision/instance_segmentation.ipynb +++ b/docs/vision/instance_segmentation.ipynb @@ -479,9 +479,9 @@ "\n", "Use the `retinanet_mobilenet_coco` experiment configuration, as defined by `tfm.vision.configs.maskrcnn.maskrcnn_mobilenet_coco`.\n", "\n", - "Please find all the registered experiements [here](https://www.tensorflow.org/api_docs/python/tfm/core/exp_factory/get_exp_config)\n", + "Please find all the registered experiments [here](https://www.tensorflow.org/api_docs/python/tfm/core/exp_factory/get_exp_config)\n", "\n", - "The configuration defines an experiment to train a Mask R-CNN model with mobilenet as backbone and FPN as decoder. Default Congiguration is trained on [COCO](https://cocodataset.org/) train2017 and evaluated on [COCO](https://cocodataset.org/) val2017.\n", + "The configuration defines an experiment to train a Mask R-CNN model with mobilenet as backbone and FPN as decoder. Default Configuration is trained on [COCO](https://cocodataset.org/) train2017 and evaluated on [COCO](https://cocodataset.org/) val2017.\n", "\n", "There are also other alternative experiments available such as\n", "`maskrcnn_resnetfpn_coco`,\n", @@ -713,7 +713,7 @@ "id": "dLcSHWjqgl66" }, "source": [ - "### Create Category Index Dictionary to map the labels to coressponding label names" + "### Create Category Index Dictionary to map the labels to corresponding label names" ] }, { @@ -734,7 +734,7 @@ }, "source": [ "### Helper Function for Visualizing the results from TFRecords\n", - "Use `visualize_boxes_and_labels_on_image_array` from `visualization_utils` to draw boudning boxes on the image." + "Use `visualize_boxes_and_labels_on_image_array` from `visualization_utils` to draw bounding boxes on the image." ] }, { @@ -786,7 +786,7 @@ " 2. Percentage of match between predicted and ground truth bounding boxes.\n", " 3. Instance Segmentation Mask\n", "\n", - "**Note**: The reason of everything is 100% is because we are visualising the groundtruth" + "**Note**: The reason of everything is 100% is because we are visualizing the groundtruth" ] }, {