From 1018fa84afbab936608e4ed421951aef5ea382be Mon Sep 17 00:00:00 2001
From: Ultralytics Assistant
<135830346+UltralyticsAssistant@users.noreply.github.com>
Date: Fri, 6 Sep 2024 00:16:06 +0800
Subject: [PATCH] Merge Ultralytics Code Refactor
Automated merge of Ultralytics Code Refactor
---
README.md | 8 ++++----
detect.py | 14 +++++++-------
models.py | 8 ++++----
scoring/matching.py | 2 +-
scoring/rectangle.py | 2 +-
scoring/score.py | 14 ++++++++------
train.py | 6 +++---
utils/datasets.py | 4 ++--
utils/utils.py | 12 ++++++------
utils/utils_xview.py | 2 +-
10 files changed, 37 insertions(+), 35 deletions(-)
diff --git a/README.md b/README.md
index cd87db3..389e17f 100755
--- a/README.md
+++ b/README.md
@@ -93,7 +93,7 @@ If you use this repository or the associated tools and datasets in your research
# 👥 Contribute
-🤝 We love contributions from the community! Our open-source projects thrive on your help. To start contributing, please check out our [Contributing Guide](https://docs.ultralytics.com/help/contributing). Additionally, we'd love to hear from you through our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). It's a way to **impact** the future of our projects. A big shoutout and thank you 🙏 to all our contributors!
+🤝 We love contributions from the community! Our open-source projects thrive on your help. To start contributing, please check out our [Contributing Guide](https://docs.ultralytics.com/help/contributing). Additionally, we'd love to hear from you through our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey). It's a way to **impact** the future of our projects. A big shoutout and thank you 🙏 to all our contributors!
@@ -104,12 +104,12 @@ If you use this repository or the associated tools and datasets in your research
At Ultralytics, we provide two different licensing options to suit various use cases:
-- **AGPL-3.0 License**: The [AGPL-3.0 License](https://www.gnu.org/licenses/agpl-3.0.html) is an [OSI-approved](https://opensource.org/licenses/) open-source format that's best suited for students, researchers, and enthusiasts to promote collaboration and knowledge sharing. The full terms can be found in the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file.
-- **Enterprise License**: If you're looking for a commercial application of our software and models, the Enterprise License enables integration into commercial products while bypassing the open-source stipulations of the AGPL-3.0. For embedding our solutions into your commercial offerings, please contact us through [Ultralytics Licensing](https://ultralytics.com/license).
+- **AGPL-3.0 License**: The [AGPL-3.0 License](https://www.gnu.org/licenses/agpl-3.0.html) is an [OSI-approved](https://opensource.org/license) open-source format that's best suited for students, researchers, and enthusiasts to promote collaboration and knowledge sharing. The full terms can be found in the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file.
+- **Enterprise License**: If you're looking for a commercial application of our software and models, the Enterprise License enables integration into commercial products while bypassing the open-source stipulations of the AGPL-3.0. For embedding our solutions into your commercial offerings, please contact us through [Ultralytics Licensing](https://www.ultralytics.com/license).
# 📬 Contact
-🐞 For reporting bugs or suggesting new features, please open an issue on our [GitHub Issues](https://github.com/ultralytics/xview-yolov3/issues) page. And if you have questions or fancy engaging with us, join our vibrant [Discord](https://ultralytics.com/discord) community!
+🐞 For reporting bugs or suggesting new features, please open an issue on our [GitHub Issues](https://github.com/ultralytics/xview-yolov3/issues) page. And if you have questions or fancy engaging with us, join our vibrant [Discord](https://discord.com/invite/ultralytics) community!
diff --git a/detect.py b/detect.py
index 72387b4..8934d8b 100755
--- a/detect.py
+++ b/detect.py
@@ -100,10 +100,10 @@ def detect(opt):
ni = int(math.ceil(img.shape[1] / length)) # up-down
nj = int(math.ceil(img.shape[2] / length)) # left-right
for i in range(ni): # for i in range(ni - 1):
- print("row %g/%g: " % (i, ni), end="")
+ print(f"row {i:g}/{ni:g}: ", end="")
for j in range(nj): # for j in range(nj if i==0 else nj - 1):
- print("%g " % j, end="", flush=True)
+ print(f"{j:g} ", end="", flush=True)
# forward scan
y2 = min((i + 1) * length, img.shape[1])
@@ -166,7 +166,7 @@ def detect(opt):
# Iterate through images and save plot of detections
for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
- print("image %g: '%s'" % (img_i, path))
+ print(f"image {img_i:g}: '{path}'")
if opt.plot_flag:
img = cv2.imread(path)
@@ -192,7 +192,7 @@ def detect(opt):
with open(results_path.replace(".bmp", ".tif") + ".txt", "a") as file:
for i in unique_classes:
n = (detections[:, -1].cpu() == i).sum()
- print("%g %ss" % (n, classes[int(i)]))
+ print(f"{n:g} {classes[int(i)]}s")
for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
# Rescale coordinates to original dimensions
@@ -207,11 +207,11 @@ def detect(opt):
# write to file
xvc = xview_indices2classes(int(cls_pred)) # xview class
# if (xvc != 21) & (xvc != 72):
- file.write(("%g %g %g %g %g %g \n") % (x1, y1, x2, y2, xvc, cls_conf * conf))
+ file.write(f"{x1:g} {y1:g} {x2:g} {y2:g} {xvc:g} {cls_conf * conf:g} \n")
if opt.plot_flag:
# Add the bbox to the plot
- label = "%s %.2f" % (classes[int(cls_pred)], cls_conf) if cls_conf > 0.05 else None
+ label = f"{classes[int(cls_pred)]} {cls_conf:.2f}" if cls_conf > 0.05 else None
color = bbox_colors[int(np.where(unique_classes == int(cls_pred))[0])]
plot_one_box([x1, y1, x2, y2], img, label=label, color=color, line_thickness=1)
@@ -236,7 +236,7 @@ def __init__(self, num_classes=60):
"""Initializes a ConvNetb model with configurable number of classes, defaulting to 60, and a series of
convolutional layers.
"""
- super(ConvNetb, self).__init__()
+ super().__init__()
n = 64 # initial convolution size
self.layer1 = nn.Sequential(
nn.Conv2d(3, n, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(n), nn.LeakyReLU()
diff --git a/models.py b/models.py
index f80ef08..42567ba 100755
--- a/models.py
+++ b/models.py
@@ -73,7 +73,7 @@ class EmptyLayer(nn.Module):
def __init__(self):
"""Initializes a placeholder layer for 'route' and 'shortcut' in YOLO architecture."""
- super(EmptyLayer, self).__init__()
+ super().__init__()
class YOLOLayer(nn.Module):
@@ -81,7 +81,7 @@ class YOLOLayer(nn.Module):
def __init__(self, anchors, nC, img_dim, anchor_idxs):
"""Initializes YOLO layer with given anchors, number of classes, image dimensions, and anchor indexes."""
- super(YOLOLayer, self).__init__()
+ super().__init__()
anchors = list(anchors)
nA = len(anchors)
@@ -232,7 +232,7 @@ def __init__(self, config_path, img_size=416):
"""Initializes Darknet model with a configuration path and optional image size, parsing and creating model
modules.
"""
- super(Darknet, self).__init__()
+ super().__init__()
self.module_defs = parse_model_config(config_path)
self.module_defs[0]["height"] = img_size
self.hyperparams, self.module_list = create_modules(self.module_defs)
@@ -294,7 +294,7 @@ def forward(self, x, targets=None, requestPrecision=False, weight=None, epoch=No
def parse_model_config(path):
"""Parses the yolo-v3 layer configuration file and returns module definitions."""
- file = open(path, "r")
+ file = open(path)
lines = file.read().split("\n")
lines = [x for x in lines if x and not x.startswith("#")]
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
diff --git a/scoring/matching.py b/scoring/matching.py
index d9f7580..57c0149 100644
--- a/scoring/matching.py
+++ b/scoring/matching.py
@@ -20,7 +20,7 @@
from scoring.rectangle import Rectangle
-class Matching(object):
+class Matching:
"""Matching class."""
def __init__(self, groundtruth_rects, rects):
diff --git a/scoring/rectangle.py b/scoring/rectangle.py
index 263538b..369c5f5 100644
--- a/scoring/rectangle.py
+++ b/scoring/rectangle.py
@@ -14,7 +14,7 @@
"""
-class Rectangle(object):
+class Rectangle:
"""Rectangle class."""
def __init__(self, xmin, ymin, xmax, ymax):
diff --git a/scoring/score.py b/scoring/score.py
index 65fc493..6943c5f 100644
--- a/scoring/score.py
+++ b/scoring/score.py
@@ -66,7 +66,7 @@ def get_labels(fname):
coords = np.zeros((len(data["features"]), 4))
chips = np.zeros((len(data["features"])), dtype="object")
- classes = np.zeros((len(data["features"])))
+ classes = np.zeros(len(data["features"]))
for i in tqdm(range(len(data["features"]))):
if data["features"][i]["properties"]["bounds_imcoords"] != []:
@@ -169,7 +169,7 @@ def score(path_predictions, path_groundtruth, path_output, iou_threshold=0.5):
fname = file.split(".txt")[0]
pchips.append(fname)
- with open(path_predictions + file, "r") as f:
+ with open(path_predictions + file) as f:
arr = np.array(list(csv.reader(f, delimiter=" ")))
if arr.shape[0] == 0:
# If the file is empty, we fill it in with an array of zeros
@@ -202,13 +202,13 @@ def score(path_predictions, path_groundtruth, path_output, iou_threshold=0.5):
max_gt_cls = 100
if set(pchips).issubset(set(gt_unique)):
- raise ValueError("The prediction files {%s} are not in the ground truth." % str(set(pchips) - (set(gt_unique))))
+ raise ValueError(f"The prediction files {{{str(set(pchips) - (set(gt_unique)))}}} are not in the ground truth.")
print("Number of Predictions: %d" % num_preds)
print("Number of GT: %d" % np.sum(gt_classes.shape))
per_file_class_data = {i: [[], []] for i in gt_unique}
- num_gt_per_cls = np.zeros((max_gt_cls))
+ num_gt_per_cls = np.zeros(max_gt_cls)
attempted = np.zeros(100)
for file_ind in range(len(pchips)):
@@ -456,7 +456,9 @@ def score(path_predictions, path_groundtruth, path_output, iou_threshold=0.5):
vals["f1"] = 2 / ((1 / (np.spacing(1) + vals["map_score"])) + (1 / (np.spacing(1) + vals["mar_score"])))
print(
- "mAP: %f | mAP score: %f | mAR: %f | F1: %f" % (vals["map"], vals["map_score"], vals["mar_score"], vals["f1"])
+ "mAP: {:f} | mAP score: {:f} | mAR: {:f} | F1: {:f}".format(
+ vals["map"], vals["map_score"], vals["mar_score"], vals["f1"]
+ )
)
# with open(path_output + '/score.txt', 'w') as f:
@@ -464,7 +466,7 @@ def score(path_predictions, path_groundtruth, path_output, iou_threshold=0.5):
#
with open(f"{path_output}/metrics.txt", "w") as f:
for key, value in vals.items():
- f.write("%s %f\n" % (str(key), value))
+ f.write(f"{str(key)} {value:f}\n")
# for key in vals.keys():
# f.write("%f\n" % (vals[key]))
for i in range(len(v2)):
diff --git a/train.py b/train.py
index be9bf5e..09ca874 100644
--- a/train.py
+++ b/train.py
@@ -153,8 +153,8 @@ def main(opt):
k = (metrics[0] + metrics[2]) > 0
mean_recall = recall[k].mean() if k.sum() > 0 else 0
s = ("%10s%10s" + "%10.3g" * 14) % (
- "%g/%g" % (epoch, opt.epochs - 1),
- "%g/%g" % (i, len(dataloader) - 1),
+ f"{epoch:g}/{opt.epochs - 1:g}",
+ f"{i:g}/{len(dataloader) - 1:g}",
rloss["x"],
rloss["y"],
rloss["w"],
@@ -214,7 +214,7 @@ def main(opt):
# Save final model
dt = time.time() - t0
- print("Finished %g epochs in %.2fs (%.2fs/epoch)" % (epoch, dt, dt / (epoch + 1)))
+ print(f"Finished {epoch:g} epochs in {dt:.2f}s ({dt / (epoch + 1):.2f}s/epoch)")
if __name__ == "__main__":
diff --git a/utils/datasets.py b/utils/datasets.py
index 3753e3d..4efaed9 100755
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -128,7 +128,7 @@ def __next__(self):
labels_all = []
for files_index in range(ia, ib):
# img_path = self.files[self.shuffled_vector[files_index]] # BGR
- img_path = "%s/%g.tif" % (self.path, self.shuffled_vector[files_index])
+ img_path = f"{self.path}/{self.shuffled_vector[files_index]:g}.tif"
# img_path = '/Users/glennjocher/Downloads/DATA/xview/train_images/2294.bmp'
img0 = cv2.imread(img_path)
@@ -383,7 +383,7 @@ def convert_tif2bmp(p="/Users/glennjocher/Downloads/DATA/xview/val_images_bmp"):
files = sorted(glob.glob(f"{p}/*.tif"))
for i, f in enumerate(files):
- print("%g/%g" % (i + 1, len(files)))
+ print(f"{i + 1:g}/{len(files):g}")
img = cv2.imread(f)
diff --git a/utils/utils.py b/utils/utils.py
index 155651e..07ed0b2 100755
--- a/utils/utils.py
+++ b/utils/utils.py
@@ -12,7 +12,7 @@
def load_classes(path):
"""Loads class labels at 'path'."""
- fp = open(path, "r")
+ fp = open(path)
return fp.read().split("\n")[:-1]
@@ -26,7 +26,7 @@ def modelinfo(model):
print(
"%4g %70s %9s %12g %20s %12g %12g" % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())
)
- print("\n%g layers, %g parameters, %g gradients" % (i + 1, nparams, ngradients))
+ print(f"\n{i + 1:g} layers, {nparams:g} parameters, {ngradients:g} gradients")
def xview_classes2indices(classes): # remap xview classes 11-94 to 0-61
@@ -698,9 +698,9 @@ def secondary_class_detection(x, y, w, h, img, model, device):
with torch.no_grad():
classes = []
nB = n // 1000 + 1
- print("%g batches..." % nB, end="")
+ print(f"{nB:g} batches...", end="")
for i in range(nB):
- print("%g " % i, end="")
+ print(f"{i:g} ", end="")
j0 = int(i * 1000)
j1 = int(min(j0 + 1000, n))
im = images[j0:j1]
@@ -729,9 +729,9 @@ def createChips():
print(counter)
if platform == "darwin": # macos
- img = cv2.imread("/Users/glennjocher/Downloads/DATA/xview/train_images/%g.tif" % i)
+ img = cv2.imread(f"/Users/glennjocher/Downloads/DATA/xview/train_images/{i:g}.tif")
else: # gcp
- img = cv2.imread("../train_images/%g.tif" % i)
+ img = cv2.imread(f"../train_images/{i:g}.tif")
for j in np.nonzero(mat["id"] == i)[0]:
c, x1, y1, x2, y2 = mat["targets"][j]
diff --git a/utils/utils_xview.py b/utils/utils_xview.py
index e210f4e..813e3f2 100644
--- a/utils/utils_xview.py
+++ b/utils/utils_xview.py
@@ -6,7 +6,7 @@
def xview_class2name(classes):
"""Converts numerical class IDs to their corresponding names using 'data/xview.names'."""
- with open("data/xview.names", "r") as f:
+ with open("data/xview.names") as f:
x = f.readlines()
return x[classes].replace("\n", "")