ISPRS-Potsdam

Prepare Dataset: ISPRS-Potsdam | mmsegmentation

The Potsdam dataset is for urban semantic segmentation used in the 2D Semantic Labeling Contest - Potsdam.

The dataset can be requested at the challenge homepage. You will get a file named utf-8' 'Potsdam.zip, and unzip this file to get a folder named Potsdam which contains 10 files:

1
2
3
4
5
6
7
8
9
10
11
Potsdam
├── 1_DSM.rar
├── 1_DSM_normalisation.zip
├── 2_Ortho_RGB.zip <--
├── 3_Ortho_IRRG.zip
├── 4_Ortho_RGBIR.zip
├── 5_Labels_all.zip
├── 5_Labels_all_noBoundary.zip <--
├── 5_Labels_for_participants.zip
├── 5_Labels_for_participants_no_Boundary.zip
├── assess_classification_reference_implementation.tgz

where only 2_Ortho_RGB.zip and 5_Labels_all_noBoundary.zip are required.

1
2
3
Potsdam
├── 2_Ortho_RGB.zip <--
├── 5_Labels_all_noBoundary.zip <--

For Potsdam dataset, please run the following command to re-organize the dataset.

1
(openmmlab) ...\mmsegmentation>python tools/dataset_converters/potsdam.py "D:/Dataset/Potsdam"

And you will get a folder structure as below:

1
2
3
4
5
6
7
8
9
10
11
12
mmsegmentation
├── mmseg
├── tools
├── configs
├── data
│ ├── potsdam
│ │ ├── img_dir
│ │ │ ├── train: 3456
│ │ │ ├── val: 2016
│ │ ├── ann_dir
│ │ │ ├── train: 3456
│ │ │ ├── val: 2016

In the default setting of mmsegmentation, it will generate 3456 images for training and 2016 images for validation.

In the 2_Ortho_RGB.zip file, it contains 38 pictures of size 6000x6000:

Potsdam

The 38 images are divided into two subsets as follows

1
2
3
4
5
6
7
8
9
10
11
splits = {
'train': [
'2_10', '2_11', '2_12', '3_10', '3_11', '3_12', '4_10', '4_11',
'4_12', '5_10', '5_11', '5_12', '6_10', '6_11', '6_12', '6_7',
'6_8', '6_9', '7_10', '7_11', '7_12', '7_7', '7_8', '7_9'
],
'val': [
'5_15', '6_15', '6_13', '3_13', '4_14', '6_14', '5_14', '2_13',
'4_15', '2_14', '5_13', '4_13', '3_14', '7_13'
]
}

And every picture have been seplited into 12x12=144 patches of size 512x512. There are 38x144=5472=3456+2016 patches in total, in which 3456 patches are used for training and 2016 for validation.

Masks are single-channel images, and the comparison table of their values and categories is as follows:

1
2
3
4
5
6
7
0: impervious surfaces
1: building
2: low vegetation
3: tree
4: car
5: background
6: boundary

If you use 5_Labels_all.zip as your ground truth

1
2
3
Potsdam
├── 2_Ortho_RGB.zip <--
├── 5_Labels_all.zip <--

the comparison table of their values and categories is as follows:

1
2
3
4
5
6
1: impervious surfaces
2: building
3: low vegetation
4: tree
5: car
6: background
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import argparse
import glob
import math
import os
import os.path as osp
import tempfile
import zipfile
from tqdm import tqdm

from PIL import Image
import numpy as np


def parse_args(arg_list):
parser = argparse.ArgumentParser(
description='Convert potsdam dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='potsdam folder path')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--clip_size',
type=int,
help='clipped size of image after preparation',
default=512)
parser.add_argument(
'--stride_size',
type=int,
help='stride of clipping original images',
default=256)
args = parser.parse_args(arg_list)
return args


def clip_big_image(image_path, clip_save_dir, args, to_label=False):
# Original image of Potsdam dataset is very large, thus pre-processing
# of them is adopted. Given fixed clip size and stride size to generate
# clipped image, the intersection of width and height is determined.
# For example, given one 5120 x 5120 original image, the clip size is
# 512 and stride size is 256, thus it would generate 20x20 = 400 images
# whose size are all 512x512.
# image = PIL.Image.open(image_path)
image = Image.open(image_path)
image = np.array(image)

h, w, c = image.shape
clip_size = args.clip_size
stride_size = args.stride_size

num_rows = math.ceil((h - clip_size) / stride_size) if math.ceil(
(h - clip_size) /
stride_size) * stride_size + clip_size >= h else math.ceil(
(h - clip_size) / stride_size) + 1
num_cols = math.ceil((w - clip_size) / stride_size) if math.ceil(
(w - clip_size) /
stride_size) * stride_size + clip_size >= w else math.ceil(
(w - clip_size) / stride_size) + 1

x, y = np.meshgrid(np.arange(num_cols + 1), np.arange(num_rows + 1))
xmin = x * clip_size
ymin = y * clip_size

xmin = xmin.ravel()
ymin = ymin.ravel()
xmin_offset = np.where(xmin + clip_size > w, w - xmin - clip_size,
np.zeros_like(xmin))
ymin_offset = np.where(ymin + clip_size > h, h - ymin - clip_size,
np.zeros_like(ymin))
boxes = np.stack([
xmin + xmin_offset, ymin + ymin_offset,
np.minimum(xmin + clip_size, w),
np.minimum(ymin + clip_size, h)
],
axis=1)

if to_label:
color_map = np.array([[0, 0, 0], [255, 255, 255], [255, 0, 0],
[255, 255, 0], [0, 255, 0], [0, 255, 255],
[0, 0, 255]])
flatten_v = np.matmul(
image.reshape(-1, c),
np.array([2, 3, 4]).reshape(3, 1))
out = np.zeros_like(flatten_v)
for idx, class_color in enumerate(color_map):
value_idx = np.matmul(class_color,
np.array([2, 3, 4]).reshape(3, 1))
out[flatten_v == value_idx] = idx
image = out.reshape(h, w)

for box in boxes:
start_x, start_y, end_x, end_y = box
clipped_image = image[start_y:end_y,
start_x:end_x] if to_label else image[
start_y:end_y, start_x:end_x, :]
idx_i, idx_j = osp.basename(image_path).split('_')[2:4]

clipped_image = Image.fromarray(clipped_image.astype(np.uint8))
clipped_image.save(
fp=osp.join(clip_save_dir, f'{idx_i}_{idx_j}_{start_x}_{start_y}_{end_x}_{end_y}.png'),
format='PNG', compress_level=1
)
# 'data\\potsdam\\img_dir\\train'


def main():
args = parse_args(["D:/Dataset/Potsdam"])
splits = {
'train': [
'2_10', '2_11', '2_12', '3_10', '3_11', '3_12', '4_10', '4_11',
'4_12', '5_10', '5_11', '5_12', '6_10', '6_11', '6_12', '6_7',
'6_8', '6_9', '7_10', '7_11', '7_12', '7_7', '7_8', '7_9'
],
'val': [
'5_15', '6_15', '6_13', '3_13', '4_14', '6_14', '5_14', '2_13',
'4_15', '2_14', '5_13', '4_13', '3_14', '7_13'
]
}

dataset_path = args.dataset_path
if args.out_dir is None:
out_dir = osp.join('data', 'potsdam') # 'data\\potsdam'
else:
out_dir = args.out_dir

print('Making directories...')
if not osp.exists(osp.join(out_dir, 'img_dir', 'train')):
os.makedirs(osp.join(out_dir, 'img_dir', 'train'))
if not osp.exists(osp.join(out_dir, 'img_dir', 'val')):
os.makedirs(osp.join(out_dir, 'img_dir', 'val'))

if not osp.exists(osp.join(out_dir, 'ann_dir', 'train')):
os.makedirs(osp.join(out_dir, 'ann_dir', 'train'))
if not osp.exists(osp.join(out_dir, 'ann_dir', 'val')):
os.makedirs(osp.join(out_dir, 'ann_dir', 'val'))

zipp_list = glob.glob(os.path.join(dataset_path, '*.zip'))
print('Find the data', zipp_list)
# ['D:/Dataset/Potsdam\\2_Ortho_RGB.zip',
# 'D:/Dataset/Potsdam\\5_Labels_all_noBoundary.zip']

for zipp in zipp_list:
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir: # tmp_dir changes in every loop
zip_file = zipfile.ZipFile(zipp)
zip_file.extractall(tmp_dir)
# Check whether the *.tif files are unziped to current directory or a sub directory
src_path_list = glob.glob(os.path.join(tmp_dir, '*.tif'))
# if len(src_path_list)==0, it means *.tif are extracted to a sub directory rather than current directory directly
if not len(src_path_list):
sub_tmp_dir = os.path.join(tmp_dir, os.listdir(tmp_dir)[0])
src_path_list = glob.glob(os.path.join(sub_tmp_dir, '*.tif'))

prog_bar = tqdm(src_path_list[:2])
for src_path in prog_bar:
idx_i, idx_j = osp.basename(src_path).split('_')[2:4] # e.g.'top_potsdam_2_10_RGB.tif'.split('_')[2:4]
data_type = 'train' if f'{idx_i}_{idx_j}' in splits[
'train'] else 'val'
if 'label' in src_path:
dst_dir = osp.join(out_dir, 'ann_dir', data_type)
clip_big_image(src_path, dst_dir, args, to_label=True)
else:
dst_dir = osp.join(out_dir, 'img_dir', data_type) # 'data\\potsdam\\img_dir\\train'
clip_big_image(src_path, dst_dir, args, to_label=False)

print('Removing the temporary files...')

print('Done!')


if __name__ == '__main__':
main()

Synapse

Synapse dataset{target="_blank"}

Multi-Atlas Labeling Beyond the Cranial Vault - Workshop and Challenge{target="_blank"}

Note: You need to join the challenge firstly, then you will see Abdomen and Cervix in the Files directory, which are private and invisible if you have not joined the challenge.
Just downloading the RawData.zip (1.531GB) is enough.