-
Notifications
You must be signed in to change notification settings - Fork 23
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 06495eb
Showing
56 changed files
with
9,013 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch | ||
FROM nvcr.io/nvidia/pytorch:21.03-py3 | ||
|
||
# Install linux packages | ||
RUN apt update && apt install -y zip htop screen libgl1-mesa-glx | ||
|
||
# Install python dependencies | ||
COPY requirements.txt . | ||
RUN python -m pip install --upgrade pip | ||
RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof | ||
RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook | ||
|
||
# Create working directory | ||
RUN mkdir -p /usr/src/app | ||
WORKDIR /usr/src/app | ||
|
||
# Copy contents | ||
COPY . /usr/src/app | ||
|
||
# Set environment variables | ||
ENV HOME=/usr/src/app | ||
|
||
|
||
# --------------------------------------------------- Extras Below --------------------------------------------------- | ||
|
||
# Build and Push | ||
# t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t | ||
# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done | ||
|
||
# Pull and Run | ||
# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t | ||
|
||
# Pull and Run with local directory access | ||
# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t | ||
|
||
# Kill all | ||
# sudo docker kill $(sudo docker ps -q) | ||
|
||
# Kill all image-based | ||
# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) | ||
|
||
# Bash into running container | ||
# sudo docker exec -it 5a9b5863d93d bash | ||
|
||
# Bash into stopped container | ||
# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash | ||
|
||
# Send weights to GCP | ||
# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt | ||
|
||
# Clean up | ||
# docker system prune -a --volumes |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,178 @@ | ||
import argparse | ||
import time | ||
from pathlib import Path | ||
|
||
import cv2 | ||
import torch | ||
import torch.backends.cudnn as cudnn | ||
from numpy import random | ||
|
||
from models.experimental import attempt_load | ||
from utils.datasets import LoadStreams, LoadImages | ||
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ | ||
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path | ||
from utils.plots import plot_one_box | ||
from utils.torch_utils import select_device, load_classifier, time_synchronized | ||
|
||
|
||
def detect(save_img=False): | ||
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size | ||
save_img = not opt.nosave and not source.endswith('.txt') # save inference images | ||
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( | ||
('rtsp://', 'rtmp://', 'http://', 'https://')) | ||
|
||
# Directories | ||
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run | ||
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir | ||
|
||
# Initialize | ||
set_logging() | ||
device = select_device(opt.device) | ||
half = device.type != 'cpu' # half precision only supported on CUDA | ||
|
||
# Load model | ||
model = attempt_load(weights, map_location=device) # load FP32 model | ||
stride = int(model.stride.max()) # model stride | ||
imgsz = check_img_size(imgsz, s=stride) # check img_size | ||
if half: | ||
model.half() # to FP16 | ||
|
||
# Second-stage classifier | ||
classify = False | ||
if classify: | ||
modelc = load_classifier(name='resnet101', n=2) # initialize | ||
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() | ||
|
||
# Set Dataloader | ||
vid_path, vid_writer = None, None | ||
if webcam: | ||
view_img = check_imshow() | ||
cudnn.benchmark = True # set True to speed up constant image size inference | ||
dataset = LoadStreams(source, img_size=imgsz, stride=stride) | ||
else: | ||
dataset = LoadImages(source, img_size=imgsz, stride=stride) | ||
|
||
# Get names and colors | ||
names = model.module.names if hasattr(model, 'module') else model.names | ||
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] | ||
|
||
# Run inference | ||
if device.type != 'cpu': | ||
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once | ||
t0 = time.time() | ||
for path, img, im0s, vid_cap in dataset: | ||
img = torch.from_numpy(img).to(device) | ||
img = img.half() if half else img.float() # uint8 to fp16/32 | ||
img /= 255.0 # 0 - 255 to 0.0 - 1.0 | ||
if img.ndimension() == 3: | ||
img = img.unsqueeze(0) | ||
|
||
# Inference | ||
t1 = time_synchronized() | ||
pred = model(img, augment=opt.augment)[0] | ||
|
||
# Apply NMS | ||
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) | ||
t2 = time_synchronized() | ||
|
||
# Apply Classifier | ||
if classify: | ||
pred = apply_classifier(pred, modelc, img, im0s) | ||
|
||
# Process detections | ||
for i, det in enumerate(pred): # detections per image | ||
if webcam: # batch_size >= 1 | ||
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count | ||
else: | ||
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) | ||
|
||
p = Path(p) # to Path | ||
save_path = str(save_dir / p.name) # img.jpg | ||
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt | ||
s += '%gx%g ' % img.shape[2:] # print string | ||
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh | ||
if len(det): | ||
# Rescale boxes from img_size to im0 size | ||
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() | ||
|
||
# Print results | ||
for c in det[:, -1].unique(): | ||
n = (det[:, -1] == c).sum() # detections per class | ||
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string | ||
|
||
# Write results | ||
for *xyxy, conf, cls in reversed(det): | ||
if save_txt: # Write to file | ||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh | ||
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format | ||
with open(txt_path + '.txt', 'a') as f: | ||
f.write(('%g ' * len(line)).rstrip() % line + '\n') | ||
|
||
if save_img or view_img: # Add bbox to image | ||
label = f'{names[int(cls)]} {conf:.2f}' | ||
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) | ||
|
||
# Print time (inference + NMS) | ||
print(f'{s}Done. ({t2 - t1:.3f}s)') | ||
|
||
# Stream results | ||
if view_img: | ||
cv2.imshow(str(p), im0) | ||
cv2.waitKey(1) # 1 millisecond | ||
|
||
# Save results (image with detections) | ||
if save_img: | ||
if dataset.mode == 'image': | ||
cv2.imwrite(save_path, im0) | ||
else: # 'video' or 'stream' | ||
if vid_path != save_path: # new video | ||
vid_path = save_path | ||
if isinstance(vid_writer, cv2.VideoWriter): | ||
vid_writer.release() # release previous video writer | ||
if vid_cap: # video | ||
fps = vid_cap.get(cv2.CAP_PROP_FPS) | ||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | ||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | ||
else: # stream | ||
fps, w, h = 30, im0.shape[1], im0.shape[0] | ||
save_path += '.mp4' | ||
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) | ||
vid_writer.write(im0) | ||
|
||
if save_txt or save_img: | ||
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' | ||
print(f"Results saved to {save_dir}{s}") | ||
|
||
print(f'Done. ({time.time() - t0:.3f}s)') | ||
|
||
|
||
if __name__ == '__main__': | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') | ||
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam | ||
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') | ||
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') | ||
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') | ||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||
parser.add_argument('--view-img', action='store_true', help='display results') | ||
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') | ||
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') | ||
parser.add_argument('--nosave', action='store_true', help='do not save images/videos') | ||
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') | ||
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') | ||
parser.add_argument('--augment', action='store_true', help='augmented inference') | ||
parser.add_argument('--update', action='store_true', help='update all models') | ||
parser.add_argument('--project', default='runs/detect', help='save results to project/name') | ||
parser.add_argument('--name', default='exp', help='save results to project/name') | ||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') | ||
opt = parser.parse_args() | ||
print(opt) | ||
check_requirements(exclude=('pycocotools', 'thop')) | ||
|
||
with torch.no_grad(): | ||
if opt.update: # update all models (to fix SourceChangeWarning) | ||
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: | ||
detect() | ||
strip_optimizer(opt.weights) | ||
else: | ||
detect() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,137 @@ | ||
"""File for accessing YOLOv5 models via PyTorch Hub https://pytorch.org/hub/ultralytics_yolov5/ | ||
Usage: | ||
import torch | ||
model = torch.hub.load('ultralytics/yolov5', 'yolov5s') | ||
""" | ||
|
||
from pathlib import Path | ||
|
||
import torch | ||
|
||
from models.yolo import Model | ||
from utils.general import check_requirements, set_logging | ||
from utils.google_utils import attempt_download | ||
from utils.torch_utils import select_device | ||
|
||
dependencies = ['torch', 'yaml'] | ||
check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) | ||
set_logging() | ||
|
||
|
||
def create(name, pretrained, channels, classes, autoshape): | ||
"""Creates a specified YOLOv5 model | ||
Arguments: | ||
name (str): name of model, i.e. 'yolov5s' | ||
pretrained (bool): load pretrained weights into the model | ||
channels (int): number of input channels | ||
classes (int): number of model classes | ||
Returns: | ||
pytorch model | ||
""" | ||
config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path | ||
try: | ||
model = Model(config, channels, classes) | ||
if pretrained: | ||
fname = f'{name}.pt' # checkpoint filename | ||
attempt_download(fname) # download if not found locally | ||
ckpt = torch.load(fname, map_location=torch.device('cpu')) # load | ||
msd = model.state_dict() # model state_dict | ||
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 | ||
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter | ||
model.load_state_dict(csd, strict=False) # load | ||
if len(ckpt['model'].names) == classes: | ||
model.names = ckpt['model'].names # set class names attribute | ||
if autoshape: | ||
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS | ||
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available | ||
return model.to(device) | ||
|
||
except Exception as e: | ||
help_url = 'https://github.com/ultralytics/yolov5/issues/36' | ||
s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url | ||
raise Exception(s) from e | ||
|
||
|
||
def custom(path_or_model='path/to/model.pt', autoshape=True): | ||
"""YOLOv5-custom model https://github.com/ultralytics/yolov5 | ||
Arguments (3 options): | ||
path_or_model (str): 'path/to/model.pt' | ||
path_or_model (dict): torch.load('path/to/model.pt') | ||
path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] | ||
Returns: | ||
pytorch model | ||
""" | ||
model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint | ||
if isinstance(model, dict): | ||
model = model['ema' if model.get('ema') else 'model'] # load model | ||
|
||
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create | ||
hub_model.load_state_dict(model.float().state_dict()) # load state_dict | ||
hub_model.names = model.names # class names | ||
if autoshape: | ||
hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS | ||
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available | ||
return hub_model.to(device) | ||
|
||
|
||
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-small model https://github.com/ultralytics/yolov5 | ||
return create('yolov5s', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-medium model https://github.com/ultralytics/yolov5 | ||
return create('yolov5m', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-large model https://github.com/ultralytics/yolov5 | ||
return create('yolov5l', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5 | ||
return create('yolov5x', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-small model https://github.com/ultralytics/yolov5 | ||
return create('yolov5s6', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-medium model https://github.com/ultralytics/yolov5 | ||
return create('yolov5m6', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-large model https://github.com/ultralytics/yolov5 | ||
return create('yolov5l6', pretrained, channels, classes, autoshape) | ||
|
||
|
||
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): | ||
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5 | ||
return create('yolov5x6', pretrained, channels, classes, autoshape) | ||
|
||
|
||
if __name__ == '__main__': | ||
model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example | ||
# model = custom(path_or_model='path/to/model.pt') # custom example | ||
|
||
# Verify inference | ||
import numpy as np | ||
from PIL import Image | ||
|
||
imgs = [Image.open('data/images/bus.jpg'), # PIL | ||
'data/images/zidane.jpg', # filename | ||
'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI | ||
np.zeros((640, 480, 3))] # numpy | ||
|
||
results = model(imgs) # batched inference | ||
results.print() | ||
results.save() |
Empty file.
Oops, something went wrong.