-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerator.py
69 lines (57 loc) · 1.78 KB
/
generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from pathlib import Path
import albumentations as A
import cv2
import numpy as np
from tensorflow.keras.utils import Sequence
transforms = A.Compose(
[
A.ShiftScaleRotate(
shift_limit=0.03125,
scale_limit=0.20,
rotate_limit=20,
border_mode=cv2.BORDER_CONSTANT,
value=0,
p=1.0,
),
A.RandomBrightnessContrast(
brightness_limit=0.2, contrast_limit=0.2, p=0.5
),
A.HorizontalFlip(p=0.5),
]
)
class ImageSequence(Sequence):
def __init__(self, cfg, df, mode):
self.df = df
self.indices = np.arange(len(df))
self.batch_size = cfg.train.batch_size
self.img_dir = (
Path(__file__)
.resolve()
.parents[1]
.joinpath("data", f"{cfg.data.db}_crop")
)
self.img_size = cfg.model.img_size
self.mode = mode
def __getitem__(self, idx):
sample_indices = self.indices[
idx * self.batch_size : (idx + 1) * self.batch_size
]
imgs = []
genders = []
ages = []
for _, row in self.df.iloc[sample_indices].iterrows():
img = cv2.imread(str(self.img_dir.joinpath(row["img_paths"])))
img = cv2.resize(img, (self.img_size, self.img_size))
if self.mode == "train":
img = transforms(image=img)["image"]
imgs.append(img)
genders.append(row["genders"])
ages.append(row["ages"])
imgs = np.asarray(imgs)
genders = np.asarray(genders)
ages = np.asarray(ages)
return imgs, (genders, ages)
def __len__(self):
return len(self.df) // self.batch_size
def on_epoch_end(self):
np.random.shuffle(self.indices)