-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmelanomia_dataset.py
82 lines (58 loc) · 2.38 KB
/
melanomia_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
from os import listdir
from os.path import join
from typing import Dict, Tuple
import numpy as np
from numpy.core.numeric import outer
import torch
from torch import Tensor
from torch.utils.data import Dataset
from PIL.Image import open, Image
class MelanomiaDataset(Dataset):
def __init__(self, image_directory, mask_directory, scale = 1, scale_mask = True):
self.scale = scale
self.scale_mask = scale_mask
self.image_directory = image_directory
self.mask_directory = mask_directory
self.images = listdir(image_directory)
self.masks = listdir(mask_directory)
self.images.sort()
self.masks.sort()
def __len__(self):
return len(self.images)
def __getitem__(self, idx: int) -> Dict[str, Tensor]:
"""Returns image and mask at given index.
Args:
idx (int): index
Returns:
Tuple[Tensor, Tensor]: (image, mask) pair.
"""
image_path = join(self.image_directory, self.images[idx])
mask_path = join(self.mask_directory, self.masks[idx])
with open(image_path) as image, open(mask_path) as mask:
output = self.scale_file_(image, mask)
output['id'] = self.images[idx]
return output
def scale_file_(self, image: Image, mask: Image) -> Tuple[Tensor, Tensor]:
"""Takes in image, mask pointer pair and returns
scaled versions as tensors.
Args:
image (Image): image file
mask (Image): mask file
Returns:
Tuple[Tensor, Tensor]: tensor versions of image and mask.
"""
width, height = image.size
width, height = int(width*self.scale), int(height*self.scale)
image = image.resize((width, height))
if self.scale_mask:
mask = mask.resize((width, height))
image, mask = np.array(image), np.array(mask)
mask = np.expand_dims(mask, axis=2)
# HWC to CHW
image = image.transpose((2, 0, 1))
mask = mask.transpose((2, 0, 1))
image = image / 255
mask = mask / 255
# TODO: Convert mask to IntTensor? (all values are 0, 1)
return {"image":torch.from_numpy(image).type(torch.FloatTensor),
"mask":torch.from_numpy(mask).type(torch.FloatTensor)}