Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
UltralyticsAssistant committed Aug 26, 2024
1 parent 38e96c2 commit 8a72424
Show file tree
Hide file tree
Showing 15 changed files with 25 additions and 27 deletions.
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@

YOLOv3 🚀 is the world's most loved vision AI, representing <a href="https://ultralytics.com">Ultralytics</a> open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.



We hope that the resources here will help you get the most out of YOLOv3. Please browse the YOLOv3 <a href="https://docs.ultralytics.com/yolov5">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/yolov3/issues/new/choose">GitHub</a> for support, and join our <a href="https://ultralytics.com/discord">Discord</a> community for questions and discussions!

To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
Expand Down
4 changes: 2 additions & 2 deletions export.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Ultralytics YOLOv3 🚀, AGPL-3.0 license
"""
Export a YOLOv3 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
Export a YOLOv3 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
Format | `export.py --include` | Model
--- | --- | ---
Expand Down Expand Up @@ -1562,7 +1562,7 @@ def parse_opt(known=False):


def main(opt):
"""Run(**vars(opt))"""
"""Run(**vars(opt))."""
for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(**vars(opt))

Expand Down
2 changes: 1 addition & 1 deletion hubconf.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Ultralytics YOLOv3 🚀, AGPL-3.0 license
"""
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5.
Usage:
import torch
Expand Down
2 changes: 1 addition & 1 deletion models/tf.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Ultralytics YOLOv3 🚀, AGPL-3.0 license
"""
TensorFlow, Keras and TFLite versions of YOLOv3
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127.
Usage:
$ python models/tf.py --weights yolov5s.pt
Expand Down
2 changes: 1 addition & 1 deletion segment/val.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over
detections (array[N, 6]), x1, y1, x2, y2, conf, class
labels (array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (array[N, 10]), for 10 IoU levels
correct (array[N, 10]), for 10 IoU levels.
"""
if masks:
if overlap:
Expand Down
4 changes: 2 additions & 2 deletions utils/dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def exif_size(img):
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose().
:param image: The image to transpose.
:return: An image.
Expand Down Expand Up @@ -1052,7 +1052,7 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): # from utils.dataloaders impo

def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False):
"""Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Usage: from utils.dataloaders import *; autosplit().
Arguments:
path: Path to images directory
Expand Down
2 changes: 1 addition & 1 deletion utils/loggers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def on_params_update(self, params: dict):
class GenericLogger:
"""
YOLOv3 General purpose logger for non-task specific logging
Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...)
Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...).
Arguments:
opt: Run arguments
Expand Down
2 changes: 1 addition & 1 deletion utils/loggers/clearml/clearml_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class ClearmlLogger:
def __init__(self, opt, hyp):
"""
- Initialize ClearML Task, this object will capture the experiment
- Upload dataset version to ClearML Data if opt.upload_dataset is True
- Upload dataset version to ClearML Data if opt.upload_dataset is True.
Arguments:
opt (namespace) -- Commandline arguments for this run
Expand Down
4 changes: 2 additions & 2 deletions utils/loggers/wandb/wandb_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(self, opt, run_id=None, job_type="Training"):
"""
- Initialize WandbLogger instance
- Upload dataset if opt.upload_dataset is True
- Setup training processes if job_type is 'Training'
- Setup training processes if job_type is 'Training'.
Arguments:
opt (namespace) -- Commandline arguments for this run
Expand Down Expand Up @@ -88,7 +88,7 @@ def setup_training(self, opt):
Setup the necessary processes for training YOLO models:
- Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
- Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
- Setup log_dict, initialize bbox_interval
- Setup log_dict, initialize bbox_interval.
Arguments:
opt (namespace) -- commandline arguments for this run
Expand Down
2 changes: 1 addition & 1 deletion utils/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def compute_ap(recall, precision):
recall: The recall curve (list)
precision: The precision curve (list)
# Returns
Average precision, precision curve, recall curve
Average precision, precision curve, recall curve.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.0], recall, [1.0]))
Expand Down
2 changes: 1 addition & 1 deletion utils/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detec
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
save_dir: Directory to save results.
"""
if "Detect" not in module_type:
batch, channels, height, width = x.shape # batch, channels, height, width
Expand Down
12 changes: 6 additions & 6 deletions utils/segment/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def process_mask_upsample(protos, masks_in, bboxes, shape):
protos: [mask_dim, mask_h, mask_w]
masks_in: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape: input_image_size, (h, w)
shape: input_image_size, (h, w).
return: h, w, n
"""
Expand All @@ -45,7 +45,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False):
proto_out: [mask_dim, mask_h, mask_w]
out_masks: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape:input_image_size, (h, w)
shape:input_image_size, (h, w).
return: h, w, n
"""
Expand All @@ -71,7 +71,7 @@ def process_mask_native(protos, masks_in, bboxes, shape):
protos: [mask_dim, mask_h, mask_w]
masks_in: [n, mask_dim], n is number of masks after nms
bboxes: [n, 4], n is number of masks after nms
shape: input_image_size, (h, w)
shape: input_image_size, (h, w).
return: h, w, n
"""
Expand All @@ -92,7 +92,7 @@ def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
"""
img1_shape: model input shape, [h, w]
img0_shape: origin pic shape, [h, w, 3]
masks: [h, w, num]
masks: [h, w, num].
"""
# Rescale coordinates (xyxy) from im1_shape to im0_shape
if ratio_pad is None: # calculate from im0_shape
Expand Down Expand Up @@ -120,7 +120,7 @@ def mask_iou(mask1, mask2, eps=1e-7):
"""
mask1: [N, n] m1 means number of predicted objects
mask2: [M, n] m2 means number of gt objects
Note: n means image_w x image_h
Note: n means image_w x image_h.
return: masks iou, [N, M]
"""
Expand All @@ -133,7 +133,7 @@ def masks_iou(mask1, mask2, eps=1e-7):
"""
mask1: [N, n] m1 means number of predicted objects
mask2: [N, n] m2 means number of gt objects
Note: n means image_w x image_h
Note: n means image_w x image_h.
return: masks iou, (N, )
"""
Expand Down
6 changes: 3 additions & 3 deletions utils/segment/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def mean_results(self):
return (self.mp, self.mr, self.map50, self.map)

def class_result(self, i):
"""Class-aware result, return p[i], r[i], ap50[i], ap[i]"""
"""Class-aware result, return p[i], r[i], ap50[i], ap[i]."""
return (self.p[i], self.r[i], self.ap50[i], self.ap[i])

def get_maps(self, nc):
Expand All @@ -140,7 +140,7 @@ def get_maps(self, nc):
def update(self, results):
"""
Args:
results: tuple(p, r, ap, f1, ap_class)
results: tuple(p, r, ap, f1, ap_class).
"""
p, r, all_ap, f1, ap_class_index = results
self.p = p
Expand All @@ -161,7 +161,7 @@ def __init__(self) -> None:
def update(self, results):
"""
Args:
results: Dict{'boxes': Dict{}, 'masks': Dict{}}
results: Dict{'boxes': Dict{}, 'masks': Dict{}}.
"""
self.metric_box.update(list(results["boxes"].values()))
self.metric_mask.update(list(results["masks"].values()))
Expand Down
4 changes: 2 additions & 2 deletions utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def profile(input, ops, n=10, device=None):
input = torch.randn(16, 3, 640, 640)
m1 = lambda x: x * torch.sigmoid(x)
m2 = nn.SiLU()
profile(input, [m1, m2], n=100) # profile over 100 iterations
profile(input, [m1, m2], n=100) # profile over 100 iterations.
"""
results = []
if not isinstance(device, torch.device):
Expand Down Expand Up @@ -453,7 +453,7 @@ def __call__(self, epoch, fitness):
class ModelEMA:
"""Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models
Keeps a moving average of everything in the model state_dict (parameters and buffers)
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
"""

def __init__(self, model, decay=0.9999, tau=2000, updates=0):
Expand Down
2 changes: 1 addition & 1 deletion utils/triton.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class TritonRemoteModel:
def __init__(self, url: str):
"""
Keyword Arguments:
url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000
url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000.
"""
parsed_url = urlparse(url)
if parsed_url.scheme == "grpc":
Expand Down

0 comments on commit 8a72424

Please sign in to comment.