Skip to content

Commit 450a848

Browse files
committed
update docs
update docs
1 parent 7ecdb0c commit 450a848

File tree

4 files changed

+228
-66
lines changed

4 files changed

+228
-66
lines changed

pymic/util/evaluation_cls.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,8 @@ def binary_evaluation(config):
7676
It should have the following fields:
7777
7878
:param metric_list: (list) A list of evaluation metrics.
79+
The supported metrics are {`accuracy`, `recall`, `sensitivity`, `specificity`,
80+
`precision`, `auc`}.
7981
:param ground_truth_csv: (str) The csv file for ground truth.
8082
:param predict_prob_csv: (str) The csv file for prediction probability.
8183
"""
@@ -110,6 +112,8 @@ def nexcl_evaluation(config):
110112
It should have the following fields:
111113
112114
:param metric_list: (list) A list of evaluation metrics.
115+
The supported metrics are {`accuracy`, `recall`, `sensitivity`, `specificity`,
116+
`precision`, `auc`}.
113117
:param ground_truth_csv: (str) The csv file for ground truth.
114118
:param predict_prob_csv: (str) The csv file for prediction probability.
115119
"""
@@ -153,6 +157,24 @@ def nexcl_evaluation(config):
153157
csv_writer.writerow(item)
154158

155159
def main():
160+
"""
161+
Main function for evaluation of classification results.
162+
A configuration file is needed for runing. e.g.,
163+
164+
.. code-block:: none
165+
166+
pymic_evaluate_cls config.cfg
167+
168+
The configuration file should have an `evaluation` section with
169+
the following fields:
170+
171+
:param task_type: (str) `cls` or `cls_nexcl`.
172+
:param metric_list: (list) A list of evaluation metrics.
173+
The supported metrics are {`accuracy`, `recall`, `sensitivity`, `specificity`,
174+
`precision`, `auc`}.
175+
:param ground_truth_csv: (str) The csv file for ground truth.
176+
:param predict_prob_csv: (str) The csv file for prediction probability.
177+
"""
156178
if(len(sys.argv) < 2):
157179
print('Number of arguments should be 2. e.g.')
158180
print(' pymic_evaluate_cls config.cfg')

pymic/util/evaluation_seg.py

Lines changed: 127 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
# -*- coding: utf-8 -*-
2+
"""
3+
Evaluation module for segmenation tasks.
4+
"""
25
from __future__ import absolute_import, print_function
36
import csv
47
import os
@@ -14,13 +17,18 @@
1417
from pymic.util.image_process import *
1518
from pymic.util.parse_config import parse_config
1619

17-
# Dice evaluation
20+
1821
def binary_dice(s, g, resize = False):
1922
"""
20-
calculate the Dice score of two N-d volumes.
21-
s: the segmentation volume of numpy array
22-
g: the ground truth volume of numpy array
23-
resize: if s and g have different shapes, resize s to match g.
23+
Calculate the Dice score of two N-d volumes for binary segmentation.
24+
25+
:param s: The segmentation volume of numpy array.
26+
:param g: the ground truth volume of numpy array.
27+
:param resize: (optional, bool)
28+
If s and g have different shapes, resize s to match g.
29+
Default is `True`.
30+
31+
:return: The Dice value.
2432
"""
2533
assert(len(s.shape)== len(g.shape))
2634
if(resize):
@@ -39,30 +47,43 @@ def binary_dice(s, g, resize = False):
3947
return dice
4048

4149
def dice_of_images(s_name, g_name):
50+
"""
51+
Calculate the Dice score given the image names of binary segmentation
52+
and ground truth, respectively.
53+
54+
:param s_name: (str) The filename of segmentation result.
55+
:param g_name: (str) The filename of ground truth.
56+
57+
:return: The Dice value.
58+
"""
4259
s = load_image_as_nd_array(s_name)['data_array']
4360
g = load_image_as_nd_array(g_name)['data_array']
4461
dice = binary_dice(s, g)
4562
return dice
4663

47-
# IOU evaluation
64+
4865
def binary_iou(s,g):
66+
"""
67+
Calculate the IoU score of two N-d volumes for binary segmentation.
68+
69+
:param s: The segmentation volume of numpy array.
70+
:param g: the ground truth volume of numpy array.
71+
72+
:return: The IoU value.
73+
"""
4974
assert(len(s.shape)== len(g.shape))
5075
intersecion = np.multiply(s, g)
5176
union = np.asarray(s + g >0, np.float32)
5277
iou = (intersecion.sum() + 1e-5)/(union.sum() + 1e-5)
5378
return iou
5479

55-
def iou_of_images(s_name, g_name):
56-
s = load_image_as_nd_array(s_name)['data_array']
57-
g = load_image_as_nd_array(g_name)['data_array']
58-
margin = (3, 8, 8)
59-
g = get_detection_binary_bounding_box(g, margin)
60-
return binary_iou(s, g)
61-
6280
# Hausdorff and ASSD evaluation
6381
def get_edge_points(img):
6482
"""
65-
get edge points of a binary segmentation result
83+
Get edge points of a binary segmentation result.
84+
85+
:param img: (numpy.array) a 2D or 3D array of binary segmentation.
86+
:return: an edge map.
6687
"""
6788
dim = len(img.shape)
6889
if(dim == 2):
@@ -76,11 +97,14 @@ def get_edge_points(img):
7697

7798
def binary_hd95(s, g, spacing = None):
7899
"""
79-
get the hausdorff distance between a binary segmentation and the ground truth
80-
inputs:
81-
s: a 3D or 2D binary image for segmentation
82-
g: a 2D or 2D binary image for ground truth
83-
spacing: a list for image spacing, length should be 3 or 2
100+
Get the 95 percentile of hausdorff distance between a binary segmentation
101+
and the ground truth.
102+
103+
:param s: (numpy.array) a 2D or 3D binary image for segmentation.
104+
:param g: (numpy.array) a 2D or 2D binary image for ground truth.
105+
:param spacing: (list) A list for image spacing, length should be 2 or 3.
106+
107+
:return: The HD95 value.
84108
"""
85109
s_edge = get_edge_points(s)
86110
g_edge = get_edge_points(g)
@@ -109,11 +133,14 @@ def binary_hd95(s, g, spacing = None):
109133

110134
def binary_assd(s, g, spacing = None):
111135
"""
112-
get the average symetric surface distance between a binary segmentation and the ground truth
113-
inputs:
114-
s: a 3D or 2D binary image for segmentation
115-
g: a 2D or 2D binary image for ground truth
116-
spacing: a list for image spacing, length should be 3 or 2
136+
Get the Average Symetric Surface Distance (ASSD) between a binary segmentation
137+
and the ground truth.
138+
139+
:param s: (numpy.array) a 2D or 3D binary image for segmentation.
140+
:param g: (numpy.array) a 2D or 2D binary image for ground truth.
141+
:param spacing: (list) A list for image spacing, length should be 2 or 3.
142+
143+
:return: The ASSD value.
117144
"""
118145
s_edge = get_edge_points(s)
119146
g_edge = get_edge_points(g)
@@ -139,14 +166,34 @@ def binary_assd(s, g, spacing = None):
139166
return assd
140167

141168
# relative volume error evaluation
142-
def binary_relative_volume_error(s_volume, g_volume):
143-
s_v = float(s_volume.sum())
144-
g_v = float(g_volume.sum())
169+
def binary_relative_volume_error(s, g):
170+
"""
171+
Get the Relative Volume Error (RVE) between a binary segmentation
172+
and the ground truth.
173+
174+
:param s: (numpy.array) a 2D or 3D binary image for segmentation.
175+
:param g: (numpy.array) a 2D or 2D binary image for ground truth.
176+
177+
:return: The RVE value.
178+
"""
179+
s_v = float(s.sum())
180+
g_v = float(g.sum())
145181
assert(g_v > 0)
146182
rve = abs(s_v - g_v)/g_v
147183
return rve
148184

149185
def get_binary_evaluation_score(s_volume, g_volume, spacing, metric):
186+
"""
187+
Evaluate the performance of binary segmentation using a specified metric.
188+
The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
189+
190+
:param s_volume: (numpy.array) a 2D or 3D binary image for segmentation.
191+
:param g_volume: (numpy.array) a 2D or 2D binary image for ground truth.
192+
:param spacing: (list) A list for image spacing, length should be 2 or 3.
193+
:param metric: (str) The metric name.
194+
195+
:return: The metric value.
196+
"""
150197
if(len(s_volume.shape) == 4):
151198
assert(s_volume.shape[0] == 1 and g_volume.shape[0] == 1)
152199
s_volume = np.reshape(s_volume, s_volume.shape[1:])
@@ -158,19 +205,14 @@ def get_binary_evaluation_score(s_volume, g_volume, spacing, metric):
158205

159206
if(metric_lower == "dice"):
160207
score = binary_dice(s_volume, g_volume)
161-
162208
elif(metric_lower == "iou"):
163209
score = binary_iou(s_volume,g_volume)
164-
165210
elif(metric_lower == 'assd'):
166211
score = binary_assd(s_volume, g_volume, spacing)
167-
168212
elif(metric_lower == "hd95"):
169213
score = binary_hd95(s_volume, g_volume, spacing)
170-
171214
elif(metric_lower == "rve"):
172215
score = binary_relative_volume_error(s_volume, g_volume)
173-
174216
elif(metric_lower == "volume"):
175217
voxel_size = 1.0
176218
for dim in range(len(spacing)):
@@ -182,6 +224,21 @@ def get_binary_evaluation_score(s_volume, g_volume, spacing, metric):
182224
return score
183225

184226
def get_multi_class_evaluation_score(s_volume, g_volume, label_list, fuse_label, spacing, metric):
227+
"""
228+
Evaluate the segmentation performance using a specified metric for a list of labels.
229+
The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
230+
If `fuse_label` is `True`, the labels in `label_list` will be merged as foreground
231+
and other labels will be merged as background as a binary segmentation result.
232+
233+
:param s_volume: (numpy.array) A 2D or 3D image for segmentation.
234+
:param g_volume: (numpy.array) A 2D or 2D image for ground truth.
235+
:param label_list: (list) A list of target labels.
236+
:param fuse_label: (bool) Fuse the labels in `label_list` or not.
237+
:param spacing: (list) A list for image spacing, length should be 2 or 3.
238+
:param metric: (str) The metric name.
239+
240+
:return: The metric value list.
241+
"""
185242
if(fuse_label):
186243
s_volume_sub = np.zeros_like(s_volume)
187244
g_volume_sub = np.zeros_like(g_volume)
@@ -198,8 +255,31 @@ def get_multi_class_evaluation_score(s_volume, g_volume, label_list, fuse_label,
198255
score_list.append(temp_score)
199256
return score_list
200257

201-
def evaluation(config_file):
202-
config = parse_config(config_file)['evaluation']
258+
def evaluation(config):
259+
"""
260+
Run evaluation of segmentation results based on a configuration dictionary `config`.
261+
The following fields should be provided in `config`:
262+
263+
:param metric: (str) The metric for evaluation.
264+
The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
265+
:param label_list: (list) The list of labels for evaluation.
266+
:param label_fuse: (option, bool) If true, fuse the labels in the `label_list`
267+
as the foreground, and other labels as the background. Default is False.
268+
:param organ_name: (str) The name of the organ for segmentation.
269+
:param ground_truth_folder_root: (str) The root dir of ground truth images.
270+
:param segmentation_folder_root: (str) The root dir of segmentation images.
271+
:param evaluation_image_pair: (str) The csv file that provide the segmentation
272+
images and the corresponding ground truth images.
273+
:param ground_truth_label_convert_source: (optional, list) The list of source
274+
labels for label conversion in the ground truth.
275+
:param ground_truth_label_convert_target: (optional, list) The list of target
276+
labels for label conversion in the ground truth.
277+
:param segmentation_label_convert_source: (optional, list) The list of source
278+
labels for label conversion in the segmentation.
279+
:param segmentation_label_convert_target: (optional, list) The list of target
280+
labels for label conversion in the segmentation.
281+
"""
282+
203283
metric = config['metric']
204284
label_list = config['label_list']
205285
label_fuse = config.get('label_fuse', False)
@@ -271,13 +351,25 @@ def evaluation(config_file):
271351
print("{0:} std ".format(metric), score_std)
272352

273353
def main():
354+
"""
355+
Main function for evaluation of segmentation results.
356+
A configuration file is needed for runing. e.g.,
357+
358+
.. code-block:: none
359+
360+
pymic_evaluate_cls config.cfg
361+
362+
The configuration file should have an `evaluation` section.
363+
See :mod:`pymic.util.evaluation_seg.evaluation` for details of the configuration required.
364+
"""
274365
if(len(sys.argv) < 2):
275366
print('Number of arguments should be 2. e.g.')
276367
print(' pymic_evaluate_seg config.cfg')
277368
exit()
278369
config_file = str(sys.argv[1])
279370
assert(os.path.isfile(config_file))
280-
evaluation(config_file)
371+
config = parse_config(config_file)['evaluation']
372+
evaluation(config)
281373

282374
if __name__ == '__main__':
283375
main()

pymic/util/general.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,19 @@
44
import numpy as np
55

66
def keyword_match(a,b):
7+
"""
8+
Test if two string are the same when converted to lower case.
9+
"""
710
return a.lower() == b.lower()
811

912
def get_one_hot_seg(label, class_num):
1013
"""
11-
convert a segmentation label to one-hot
12-
label: a tensor with a shape of [N, 1, D, H, W] or [N, 1, H, W]
13-
class_num: class number.
14-
output: an one-hot tensor with a shape of [N, C, D, H, W] or [N, C, H, W]
14+
Convert a segmentation label to one-hot.
15+
16+
:param label: A tensor with a shape of [N, 1, D, H, W] or [N, 1, H, W]
17+
:param class_num: Class number.
18+
19+
:return: a one-hot tensor with a shape of [N, C, D, H, W] or [N, C, H, W].
1520
"""
1621
size = list(label.size())
1722
if(size[1] != 1):

0 commit comments

Comments
 (0)