Skip to content

Commit fbf9508

Browse files
committed
updated
1 parent f2d6425 commit fbf9508

32 files changed

+947
-283
lines changed

superannotate/input_converters/conversion.py

Lines changed: 56 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -10,61 +10,64 @@
1010
from .sa_conversion import sa_convert_platform, sa_convert_project_type
1111
from ..exceptions import SABaseException
1212

13-
AVAILABLE_ANNOTATION_FORMATS = [
14-
"COCO", "VOC", "LabelBox", "DataLoop", 'Supervisely', 'VoTT', 'SageMaker',
15-
'VGG', 'GoogleCloud'
16-
]
17-
1813
AVAILABLE_PLATFORMS = ["Desktop", "Web"]
1914

2015
ALLOWED_TASK_TYPES = [
2116
'panoptic_segmentation', 'instance_segmentation', 'keypoint_detection',
22-
'object_detection', 'vector_annotation', 'pixel_annotation'
17+
'object_detection', 'vector_annotation'
2318
]
2419

2520
ALLOWED_PROJECT_TYPES = ['Pixel', 'Vector']
2621

27-
ALLOWED_CONVERSIONS_SA_TO_COCO = [
28-
('Pixel', 'panoptic_segmentation'), ('Pixel', 'instance_segmentation'),
29-
('Vector', 'instance_segmentation'), ('Vector', 'keypoint_detection'),
30-
('Vector', 'object_detection')
31-
]
32-
33-
ALLOWED_CONVERSIONS_COCO_TO_SA = [
34-
('Pixel', 'panoptic_segmentation'), ('Pixel', 'instance_segmentation'),
35-
('Vector', 'keypoint_detection'), ('Vector', 'instance_segmentation')
36-
]
37-
38-
ALLOWED_CONVERSIONS_VOC_TO_SA = [
39-
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
40-
('Pixel', 'instance_segmentation')
41-
]
42-
43-
ALLOWED_CONVERSIONS_LABELBOX_TO_SA = [
44-
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
45-
('Vector', 'vector_annotation')
46-
]
47-
48-
ALLOWED_CONVERSIONS_DATALOOP_TO_SA = [
49-
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
50-
('Vector', 'vector_annotation')
51-
]
52-
53-
ALLOWED_CONVERSIONS_SUPERVISELY_TO_SA = [('Vector', 'vector_annotation')]
54-
55-
ALLOWED_CONVERSIONS_VOTT_TO_SA = [
56-
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
57-
('Vector', 'vector_annotation')
58-
]
59-
60-
# ALLOWED_CONVERSIONS_SAGEMAKER_TO_SA = [('Vector', 'object_detection')]
61-
62-
ALLOWED_CONVERSIONS_VGG_TO_SA = [
63-
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
64-
('Vector', 'vector_annotation')
65-
]
66-
67-
ALLOWED_CONVERSIONS_GOOGLECLOUD_TO_SA = [('Vector', 'object_detection')]
22+
ALLOWED_ANNOTATION_IMPORT_FORMATS = {
23+
'COCO':
24+
[
25+
('Pixel', 'panoptic_segmentation'),
26+
('Pixel', 'instance_segmentation'),
27+
('Vector', 'keypoint_detection'),
28+
('Vector', 'instance_segmentation')
29+
],
30+
'VOC':
31+
[
32+
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
33+
('Pixel', 'instance_segmentation')
34+
],
35+
'LabelBox':
36+
[
37+
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
38+
('Vector', 'vector_annotation')
39+
],
40+
'DataLoop':
41+
[
42+
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
43+
('Vector', 'vector_annotation')
44+
],
45+
'Supervisely': [('Vector', 'vector_annotation')],
46+
'VoTT':
47+
[
48+
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
49+
('Vector', 'vector_annotation')
50+
],
51+
'SageMaker':
52+
[('Pixel', 'instance_segmentation'), ('Vector', 'object_detection')],
53+
'VGG':
54+
[
55+
('Vector', 'object_detection'), ('Vector', 'instance_segmentation'),
56+
('Vector', 'vector_annotation')
57+
],
58+
'GoogleCloud': [('Vector', 'object_detection')],
59+
'YOLO': [('Vector', 'object_detection')]
60+
}
61+
62+
ALLOWED_ANNOTATION_EXPORT_FORMATS = {
63+
'COCO':
64+
[
65+
('Pixel', 'panoptic_segmentation'),
66+
('Pixel', 'instance_segmentation'),
67+
('Vector', 'instance_segmentation'),
68+
('Vector', 'keypoint_detection'), ('Vector', 'object_detection')
69+
]
70+
}
6871

6972

7073
def _passes_sanity_checks(args):
@@ -80,9 +83,9 @@ def _passes_sanity_checks(args):
8083
)
8184
raise SABaseException(0, log_msg)
8285

83-
if args.dataset_format not in AVAILABLE_ANNOTATION_FORMATS:
86+
if args.dataset_format not in ALLOWED_ANNOTATION_IMPORT_FORMATS.keys():
8487
log_msg = "'%s' converter doesn't exist. Possible candidates are '%s'"\
85-
% (args.dataset_format, AVAILABLE_ANNOTATION_FORMATS)
88+
% (args.dataset_format, ALLOWED_ANNOTATION_IMPORT_FORMATS.keys())
8689
raise SABaseException(0, log_msg)
8790

8891
if not isinstance(args.dataset_name, str):
@@ -114,27 +117,12 @@ def _passes_sanity_checks(args):
114117
def _passes_converter_sanity(args, direction):
115118
converter_values = (args.project_type, args.task)
116119
if direction == 'import':
117-
if args.dataset_format == "COCO" and converter_values in ALLOWED_CONVERSIONS_COCO_TO_SA:
118-
return True
119-
elif args.dataset_format == "VOC" and converter_values in ALLOWED_CONVERSIONS_VOC_TO_SA:
120-
return True
121-
elif args.dataset_format == "LabelBox" and \
122-
converter_values in ALLOWED_CONVERSIONS_LABELBOX_TO_SA:
123-
return True
124-
elif args.dataset_format == "DataLoop" and converter_values in ALLOWED_CONVERSIONS_DATALOOP_TO_SA:
125-
return True
126-
elif args.dataset_format == "Supervisely" and converter_values in ALLOWED_CONVERSIONS_SUPERVISELY_TO_SA:
127-
return True
128-
elif args.dataset_format == 'VoTT' and converter_values in ALLOWED_CONVERSIONS_VOTT_TO_SA:
129-
return True
130-
# elif args.dataset_format == 'SageMaker' and converter_values in ALLOWED_CONVERSIONS_SAGEMAKER_TO_SA:
131-
# return True
132-
elif args.dataset_format == 'VGG' and converter_values in ALLOWED_CONVERSIONS_VGG_TO_SA:
133-
return True
134-
elif args.dataset_format == 'GoogleCloud' and converter_values in ALLOWED_CONVERSIONS_GOOGLECLOUD_TO_SA:
120+
if converter_values in ALLOWED_ANNOTATION_IMPORT_FORMATS[
121+
args.dataset_format]:
135122
return True
136123
else:
137-
if args.dataset_format == "COCO" and converter_values in ALLOWED_CONVERSIONS_SA_TO_COCO:
124+
if converter_values in ALLOWED_ANNOTATION_EXPORT_FORMATS[
125+
args.dataset_format]:
138126
return True
139127

140128
log_msg = "Please enter valid converter values. You can check available \

superannotate/input_converters/converters/coco_converters/coco_converter.py

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import tqdm
1313
import time
14+
from pathlib import Path
1415

1516

1617
class CoCoConverter(object):
@@ -21,6 +22,7 @@ def __init__(self, args):
2122
self.output_dir = args.output_dir
2223
self.task = args.task
2324
self.direction = args.direction
25+
self.platform = args.platform
2426

2527
self.failed_conversion_cnt = 0
2628

@@ -187,10 +189,8 @@ def _create_sa_classes(self, json_path):
187189
'attribute_groups': []
188190
}
189191
classes.append(classes_dict)
190-
with open(
191-
os.path.join(self.output_dir, "classes", "classes.json"), "w"
192-
) as fp:
193-
json.dump(classes, fp)
192+
193+
return classes
194194

195195
def _generate_colors(self, number):
196196
colors = []
@@ -199,3 +199,48 @@ def _generate_colors(self, number):
199199
hexcolor = "#%02x%02x%02x" % tuple(color)
200200
colors.append(hexcolor)
201201
return colors
202+
203+
def save_desktop_format(self, classes, files_dict):
204+
path = Path(self.output_dir)
205+
cat_id_map = {}
206+
new_classes = []
207+
for idx, class_ in enumerate(classes):
208+
cat_id_map[class_['id']] = idx + 2
209+
class_['id'] = idx + 2
210+
new_classes.append(class_)
211+
with open(path.joinpath('classes.json'), 'w') as fw:
212+
json.dump(new_classes, fw)
213+
214+
meta = {
215+
"type": "meta",
216+
"name": "lastAction",
217+
"timestamp": int(round(time.time() * 1000))
218+
}
219+
new_json = {}
220+
for file_name, json_data in files_dict.items():
221+
file_name = file_name.replace('___objects.json', '')
222+
new_json_data = []
223+
for js_data in json_data:
224+
if 'classId' in js_data:
225+
new_js_data = js_data.copy()
226+
new_js_data['classId'] = cat_id_map[js_data['classId']]
227+
new_json_data.append(new_js_data)
228+
new_json_data.append(meta)
229+
new_json[file_name] = new_json_data
230+
with open(path.joinpath('annotations.json'), 'w') as fw:
231+
json.dump(new_json, fw)
232+
233+
def save_web_format(self, classes, files_dict):
234+
path = Path(self.output_dir)
235+
for key, value in files_dict.items():
236+
with open(path.joinpath(key), 'w') as fw:
237+
json.dump(value, fw, indent=2)
238+
239+
with open(path.joinpath('classes', 'classes.json'), 'w') as fw:
240+
json.dump(classes, fw)
241+
242+
def dump_output(self, classes, files_dict):
243+
if self.platform == 'Web':
244+
self.save_web_format(classes, files_dict)
245+
else:
246+
self.save_desktop_format(classes, files_dict)

superannotate/input_converters/converters/coco_converters/coco_strategies.py

Lines changed: 9 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -87,14 +87,9 @@ def sa_to_output_format(self):
8787

8888
def to_sa_format(self):
8989
json_data = os.path.join(self.export_root, self.dataset_name + ".json")
90-
self._create_sa_classes(json_data)
91-
loader = self.conversion_algorithm(json_data, self.output_dir)
92-
# jsons = glob.glob(
93-
# os.path.join(self.export_root, '*.json'), recursive=True
94-
# )
95-
# for json_data in jsons:
96-
# self._create_sa_classes(json_data)
97-
# loader = self.conversion_algorithm(json_data, self.output_dir)
90+
sa_classes = self._create_sa_classes(json_data)
91+
sa_jsons = self.conversion_algorithm(json_data, self.output_dir)
92+
self.dump_output(sa_classes, sa_jsons)
9893

9994

10095
class CocoObjectDetectionStrategy(CoCoConverter):
@@ -201,14 +196,9 @@ def sa_to_output_format(self):
201196

202197
def to_sa_format(self):
203198
json_data = os.path.join(self.export_root, self.dataset_name + ".json")
204-
self._create_sa_classes(json_data)
205-
loader = self.conversion_algorithm(json_data, self.output_dir)
206-
# jsons = glob.glob(
207-
# os.path.join(self.export_root, '*.json'), recursive=True
208-
# )
209-
# for json_data in jsons:
210-
# self._create_sa_classes(json_data)
211-
# loader = self.conversion_algorithm(json_data, self.output_dir)
199+
sa_classes = self._create_sa_classes(json_data)
200+
sa_jsons = self.conversion_algorithm(json_data, self.output_dir)
201+
self.dump_output(sa_classes, sa_jsons)
212202

213203

214204
class CocoKeypointDetectionStrategy(CoCoConverter):
@@ -276,11 +266,6 @@ def sa_to_output_format(self):
276266

277267
def to_sa_format(self):
278268
json_data = os.path.join(self.export_root, self.dataset_name + ".json")
279-
self._create_sa_classes(json_data)
280-
loader = self.conversion_algorithm(json_data, self.output_dir)
281-
# jsons = glob.glob(
282-
# os.path.join(self.export_root, '*.json'), recursive=True
283-
# )
284-
# for json_data in jsons:
285-
# self._create_sa_classes(json_data)
286-
# loader = self.conversion_algorithm(json_data, self.output_dir)
269+
sa_classes = self._create_sa_classes(json_data)
270+
sa_jsons = self.conversion_algorithm(json_data, self.output_dir)
271+
self.dump_output(sa_classes, sa_jsons)

superannotate/input_converters/converters/coco_converters/coco_to_sa_pixel.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ def coco_panoptic_segmentation_to_sa_pixel(coco_path, images_path):
1515
hex_colors = blue_color_generator(len(coco_json["categories"]))
1616
annotate_list = coco_json["annotations"]
1717

18+
sa_jsons = {}
1819
for annotate in tqdm(annotate_list, "Converting"):
1920
annot_name = os.path.splitext(annotate["file_name"])[0]
2021
img_cv = cv2.imread(os.path.join(images_path, annot_name + ".png"))
@@ -49,17 +50,15 @@ def coco_panoptic_segmentation_to_sa_pixel(coco_path, images_path):
4950
}
5051
out_json.append(dd)
5152

52-
with open(
53-
os.path.join(images_path, annot_name + ".jpg___pixel.json"), "w"
54-
) as writer:
55-
json.dump(out_json, writer, indent=2)
56-
5753
img = cv2.cvtColor(img.reshape((H, W, C)), cv2.COLOR_RGB2BGR)
5854
cv2.imwrite(
5955
os.path.join(images_path, annot_name + ".jpg___save.png"), img
6056
)
6157

58+
file_name = annot_name + ".jpg___pixel.json"
59+
sa_jsons[file_name] = out_json
6260
os.remove(os.path.join(images_path, annot_name + ".png"))
61+
return sa_jsons
6362

6463

6564
def coco_instance_segmentation_to_sa_pixel(coco_path, images_path):
@@ -116,6 +115,8 @@ def coco_instance_segmentation_to_sa_pixel(coco_path, images_path):
116115
value['mask']
117116
)
118117

118+
sa_jsons = {}
119119
for key, sa_js in sa_json.items():
120-
with open(os.path.join(images_path, key + '___pixel.json'), 'w') as fw:
121-
json.dump(sa_js, fw, indent=2)
120+
file_name = key + '___pixel.json'
121+
sa_jsons[file_name] = sa_js
122+
return sa_jsons

superannotate/input_converters/converters/coco_converters/coco_to_sa_vector.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ def coco_instance_segmentation_to_sa_vector(coco_path, images_path):
9999
image_id_to_annotations[sa_dict_bbox['imageId']
100100
].append(sa_dict_bbox)
101101

102+
sa_jsons = {}
102103
for img in tqdm(coco_json['images'], "Writing annotations to disk"):
103104
if img['id'] not in image_id_to_annotations:
104105
continue
@@ -107,10 +108,9 @@ def coco_instance_segmentation_to_sa_vector(coco_path, images_path):
107108
image_path = img['file_name']
108109
else:
109110
image_path = img['coco_url'].split('/')[-1]
110-
with open(
111-
os.path.join(images_path, image_path + "___objects.json"), "w"
112-
) as new_json:
113-
json.dump(f_loader, new_json, indent=2)
111+
file_name = image_path + "___objects.json"
112+
sa_jsons[file_name] = f_loader
113+
return sa_jsons
114114

115115

116116
def coco_keypoint_detection_to_sa_vector(coco_path, images_path):
@@ -249,13 +249,12 @@ def coco_keypoint_detection_to_sa_vector(coco_path, images_path):
249249
if sa_template['imageId'] == img['id']:
250250
loader.append((img['id'], sa_template))
251251

252+
sa_jsons = {}
252253
for img in coco_json['images']:
253254
f_loader = []
254255
for img_id, img_data in loader:
255256
if img['id'] == img_id:
256257
f_loader.append(img_data)
257-
with open(
258-
os.path.join(images_path, img['file_name'] + "___objects.json"),
259-
"w"
260-
) as new_json:
261-
json.dump(f_loader, new_json, indent=2)
258+
file_name = img['file_name'] + "___objects.json"
259+
sa_jsons[file_name] = f_loader
260+
return sa_jsons

0 commit comments

Comments
 (0)