11# -*- coding: utf-8 -*-
2+ """
3+ Evaluation module for segmenation tasks.
4+ """
25from __future__ import absolute_import , print_function
36import csv
47import os
1417from pymic .util .image_process import *
1518from pymic .util .parse_config import parse_config
1619
17- # Dice evaluation
20+
1821def binary_dice (s , g , resize = False ):
1922 """
20- calculate the Dice score of two N-d volumes.
21- s: the segmentation volume of numpy array
22- g: the ground truth volume of numpy array
23- resize: if s and g have different shapes, resize s to match g.
23+ Calculate the Dice score of two N-d volumes for binary segmentation.
24+
25+ :param s: The segmentation volume of numpy array.
26+ :param g: the ground truth volume of numpy array.
27+ :param resize: (optional, bool)
28+ If s and g have different shapes, resize s to match g.
29+ Default is `True`.
30+
31+ :return: The Dice value.
2432 """
2533 assert (len (s .shape )== len (g .shape ))
2634 if (resize ):
@@ -39,30 +47,43 @@ def binary_dice(s, g, resize = False):
3947 return dice
4048
4149def dice_of_images (s_name , g_name ):
50+ """
51+ Calculate the Dice score given the image names of binary segmentation
52+ and ground truth, respectively.
53+
54+ :param s_name: (str) The filename of segmentation result.
55+ :param g_name: (str) The filename of ground truth.
56+
57+ :return: The Dice value.
58+ """
4259 s = load_image_as_nd_array (s_name )['data_array' ]
4360 g = load_image_as_nd_array (g_name )['data_array' ]
4461 dice = binary_dice (s , g )
4562 return dice
4663
47- # IOU evaluation
64+
4865def binary_iou (s ,g ):
66+ """
67+ Calculate the IoU score of two N-d volumes for binary segmentation.
68+
69+ :param s: The segmentation volume of numpy array.
70+ :param g: the ground truth volume of numpy array.
71+
72+ :return: The IoU value.
73+ """
4974 assert (len (s .shape )== len (g .shape ))
5075 intersecion = np .multiply (s , g )
5176 union = np .asarray (s + g > 0 , np .float32 )
5277 iou = (intersecion .sum () + 1e-5 )/ (union .sum () + 1e-5 )
5378 return iou
5479
55- def iou_of_images (s_name , g_name ):
56- s = load_image_as_nd_array (s_name )['data_array' ]
57- g = load_image_as_nd_array (g_name )['data_array' ]
58- margin = (3 , 8 , 8 )
59- g = get_detection_binary_bounding_box (g , margin )
60- return binary_iou (s , g )
61-
6280# Hausdorff and ASSD evaluation
6381def get_edge_points (img ):
6482 """
65- get edge points of a binary segmentation result
83+ Get edge points of a binary segmentation result.
84+
85+ :param img: (numpy.array) a 2D or 3D array of binary segmentation.
86+ :return: an edge map.
6687 """
6788 dim = len (img .shape )
6889 if (dim == 2 ):
@@ -76,11 +97,14 @@ def get_edge_points(img):
7697
7798def binary_hd95 (s , g , spacing = None ):
7899 """
79- get the hausdorff distance between a binary segmentation and the ground truth
80- inputs:
81- s: a 3D or 2D binary image for segmentation
82- g: a 2D or 2D binary image for ground truth
83- spacing: a list for image spacing, length should be 3 or 2
100+ Get the 95 percentile of hausdorff distance between a binary segmentation
101+ and the ground truth.
102+
103+ :param s: (numpy.array) a 2D or 3D binary image for segmentation.
104+ :param g: (numpy.array) a 2D or 2D binary image for ground truth.
105+ :param spacing: (list) A list for image spacing, length should be 2 or 3.
106+
107+ :return: The HD95 value.
84108 """
85109 s_edge = get_edge_points (s )
86110 g_edge = get_edge_points (g )
@@ -109,11 +133,14 @@ def binary_hd95(s, g, spacing = None):
109133
110134def binary_assd (s , g , spacing = None ):
111135 """
112- get the average symetric surface distance between a binary segmentation and the ground truth
113- inputs:
114- s: a 3D or 2D binary image for segmentation
115- g: a 2D or 2D binary image for ground truth
116- spacing: a list for image spacing, length should be 3 or 2
136+ Get the Average Symetric Surface Distance (ASSD) between a binary segmentation
137+ and the ground truth.
138+
139+ :param s: (numpy.array) a 2D or 3D binary image for segmentation.
140+ :param g: (numpy.array) a 2D or 2D binary image for ground truth.
141+ :param spacing: (list) A list for image spacing, length should be 2 or 3.
142+
143+ :return: The ASSD value.
117144 """
118145 s_edge = get_edge_points (s )
119146 g_edge = get_edge_points (g )
@@ -139,14 +166,34 @@ def binary_assd(s, g, spacing = None):
139166 return assd
140167
141168# relative volume error evaluation
142- def binary_relative_volume_error (s_volume , g_volume ):
143- s_v = float (s_volume .sum ())
144- g_v = float (g_volume .sum ())
169+ def binary_relative_volume_error (s , g ):
170+ """
171+ Get the Relative Volume Error (RVE) between a binary segmentation
172+ and the ground truth.
173+
174+ :param s: (numpy.array) a 2D or 3D binary image for segmentation.
175+ :param g: (numpy.array) a 2D or 2D binary image for ground truth.
176+
177+ :return: The RVE value.
178+ """
179+ s_v = float (s .sum ())
180+ g_v = float (g .sum ())
145181 assert (g_v > 0 )
146182 rve = abs (s_v - g_v )/ g_v
147183 return rve
148184
149185def get_binary_evaluation_score (s_volume , g_volume , spacing , metric ):
186+ """
187+ Evaluate the performance of binary segmentation using a specified metric.
188+ The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
189+
190+ :param s_volume: (numpy.array) a 2D or 3D binary image for segmentation.
191+ :param g_volume: (numpy.array) a 2D or 2D binary image for ground truth.
192+ :param spacing: (list) A list for image spacing, length should be 2 or 3.
193+ :param metric: (str) The metric name.
194+
195+ :return: The metric value.
196+ """
150197 if (len (s_volume .shape ) == 4 ):
151198 assert (s_volume .shape [0 ] == 1 and g_volume .shape [0 ] == 1 )
152199 s_volume = np .reshape (s_volume , s_volume .shape [1 :])
@@ -158,19 +205,14 @@ def get_binary_evaluation_score(s_volume, g_volume, spacing, metric):
158205
159206 if (metric_lower == "dice" ):
160207 score = binary_dice (s_volume , g_volume )
161-
162208 elif (metric_lower == "iou" ):
163209 score = binary_iou (s_volume ,g_volume )
164-
165210 elif (metric_lower == 'assd' ):
166211 score = binary_assd (s_volume , g_volume , spacing )
167-
168212 elif (metric_lower == "hd95" ):
169213 score = binary_hd95 (s_volume , g_volume , spacing )
170-
171214 elif (metric_lower == "rve" ):
172215 score = binary_relative_volume_error (s_volume , g_volume )
173-
174216 elif (metric_lower == "volume" ):
175217 voxel_size = 1.0
176218 for dim in range (len (spacing )):
@@ -182,6 +224,21 @@ def get_binary_evaluation_score(s_volume, g_volume, spacing, metric):
182224 return score
183225
184226def get_multi_class_evaluation_score (s_volume , g_volume , label_list , fuse_label , spacing , metric ):
227+ """
228+ Evaluate the segmentation performance using a specified metric for a list of labels.
229+ The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
230+ If `fuse_label` is `True`, the labels in `label_list` will be merged as foreground
231+ and other labels will be merged as background as a binary segmentation result.
232+
233+ :param s_volume: (numpy.array) A 2D or 3D image for segmentation.
234+ :param g_volume: (numpy.array) A 2D or 2D image for ground truth.
235+ :param label_list: (list) A list of target labels.
236+ :param fuse_label: (bool) Fuse the labels in `label_list` or not.
237+ :param spacing: (list) A list for image spacing, length should be 2 or 3.
238+ :param metric: (str) The metric name.
239+
240+ :return: The metric value list.
241+ """
185242 if (fuse_label ):
186243 s_volume_sub = np .zeros_like (s_volume )
187244 g_volume_sub = np .zeros_like (g_volume )
@@ -198,8 +255,31 @@ def get_multi_class_evaluation_score(s_volume, g_volume, label_list, fuse_label,
198255 score_list .append (temp_score )
199256 return score_list
200257
201- def evaluation (config_file ):
202- config = parse_config (config_file )['evaluation' ]
258+ def evaluation (config ):
259+ """
260+ Run evaluation of segmentation results based on a configuration dictionary `config`.
261+ The following fields should be provided in `config`:
262+
263+ :param metric: (str) The metric for evaluation.
264+ The metric options are {`dice`, `iou`, `assd`, `hd95`, `rve`, `volume`}.
265+ :param label_list: (list) The list of labels for evaluation.
266+ :param label_fuse: (option, bool) If true, fuse the labels in the `label_list`
267+ as the foreground, and other labels as the background. Default is False.
268+ :param organ_name: (str) The name of the organ for segmentation.
269+ :param ground_truth_folder_root: (str) The root dir of ground truth images.
270+ :param segmentation_folder_root: (str) The root dir of segmentation images.
271+ :param evaluation_image_pair: (str) The csv file that provide the segmentation
272+ images and the corresponding ground truth images.
273+ :param ground_truth_label_convert_source: (optional, list) The list of source
274+ labels for label conversion in the ground truth.
275+ :param ground_truth_label_convert_target: (optional, list) The list of target
276+ labels for label conversion in the ground truth.
277+ :param segmentation_label_convert_source: (optional, list) The list of source
278+ labels for label conversion in the segmentation.
279+ :param segmentation_label_convert_target: (optional, list) The list of target
280+ labels for label conversion in the segmentation.
281+ """
282+
203283 metric = config ['metric' ]
204284 label_list = config ['label_list' ]
205285 label_fuse = config .get ('label_fuse' , False )
@@ -271,13 +351,25 @@ def evaluation(config_file):
271351 print ("{0:} std " .format (metric ), score_std )
272352
273353def main ():
354+ """
355+ Main function for evaluation of segmentation results.
356+ A configuration file is needed for runing. e.g.,
357+
358+ .. code-block:: none
359+
360+ pymic_evaluate_cls config.cfg
361+
362+ The configuration file should have an `evaluation` section.
363+ See :mod:`pymic.util.evaluation_seg.evaluation` for details of the configuration required.
364+ """
274365 if (len (sys .argv ) < 2 ):
275366 print ('Number of arguments should be 2. e.g.' )
276367 print (' pymic_evaluate_seg config.cfg' )
277368 exit ()
278369 config_file = str (sys .argv [1 ])
279370 assert (os .path .isfile (config_file ))
280- evaluation (config_file )
371+ config = parse_config (config_file )['evaluation' ]
372+ evaluation (config )
281373
282374if __name__ == '__main__' :
283375 main ()
0 commit comments