Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix problematic header read/write for *.weights file #842

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions config/create_custom_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@

NUM_CLASSES=$1

echo "
[net]
echo "[net]
# Testing
#batch=1
#subdivisions=1
Expand Down Expand Up @@ -790,5 +789,4 @@ num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=1
" >> yolov3-custom.cfg
random=1" > yolov3-custom.cfg
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "PyTorchYolo"
version = "1.8.0"
version = "1.8.1"
readme = "README.md"
repository = "https://github.com/eriklindernoren/PyTorch-YOLOv3"
description = "Minimal PyTorch implementation of YOLO"
Expand Down
24 changes: 16 additions & 8 deletions pytorchyolo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,10 @@ def __init__(self, config_path):
self.hyperparams, self.module_list = create_modules(self.module_defs)
self.yolo_layers = [layer[0]
for layer in self.module_list if isinstance(layer[0], YOLOLayer)]
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
# NOTE: Fields of *.weights's header: major, minor, revision and seen
# [major, minor, revision] always np.int32 (Python) or int (C++, 4 bytes for most compiler)
# [seen] np.uint64(C++) or size_t(C++, 8 bytes) here, or 4 bytes, decided by header's version
self.seen = np.uint64(0)

def forward(self, x):
img_size = x.size(2)
Expand All @@ -227,10 +229,15 @@ def load_darknet_weights(self, weights_path):

# Open the weights file
with open(weights_path, "rb") as f:
# First five are header values
header = np.fromfile(f, dtype=np.int32, count=5)
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
# Needed to write header when saving weights
header_version = np.fromfile(f, dtype=np.int32, count=3)
major, minor, _ = header_version
# number of images seen during training
if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000:
seen = np.fromfile(f, dtype=np.uint64, count=1)[0]
else:
seen = np.fromfile(f, dtype=np.int32, count=1)[0]
self.seen = seen.astype(np.uint64, casting='safe')
weights = np.fromfile(f, dtype=np.float32) # The rest are weights

# Establish cutoff for loading backbone weights
Expand Down Expand Up @@ -294,8 +301,9 @@ def save_darknet_weights(self, path, cutoff=-1):
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
header_version = np.array([0, 2, 0], dtype=np.int32)
header_version.tofile(fp)
self.seen.tofile(fp)

# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
Expand Down
3 changes: 2 additions & 1 deletion pytorchyolo/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import os
import argparse
import tqdm
import numpy as np

import torch
from torch.utils.data import DataLoader
Expand Down Expand Up @@ -215,7 +216,7 @@ def run():
("train/loss", to_cpu(loss).item())]
logger.list_of_scalars_summary(tensorboard_log, batches_done)

model.seen += imgs.size(0)
model.seen += np.uint64(imgs.size(0))

# #############
# Save progress
Expand Down