Skip to content

Commit 2888a2f

Browse files
authored
Merge pull request #560 from superannotateai/1708_implimentation
1708 implimentation
2 parents 9402ba2 + d6573e1 commit 2888a2f

File tree

15 files changed

+16
-465
lines changed

15 files changed

+16
-465
lines changed

docs/source/api_reference/helpers.rst

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@ _________________________________________________________________
1515
.. autofunction:: superannotate.convert_project_type
1616
.. autofunction:: superannotate.convert_json_version
1717

18-
19-
2018
----------
2119

2220
Working with annotations
@@ -28,15 +26,7 @@ ________________________
2826

2927
----------
3028

31-
Aggregating class distribution from annotations
32-
_____________________________________________________________
33-
34-
.. autofunction:: superannotate.class_distribution
35-
36-
----------
37-
3829
Utility functions
3930
--------------------------------
4031

41-
.. autofunction:: superannotate.SAClient.consensus
42-
.. autofunction:: superannotate.SAClient.benchmark
32+
.. autofunction:: superannotate.SAClient.consensus

docs/source/userguide/quickstart.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ It can be installed on Ubuntu with:
2323
2424
sudo apt-get install ffmpeg
2525
26-
For Windows and Mac OS based installations to use :py:obj:`benchmark` and :py:obj:`consensus`
27-
functions you might also need to install beforehand :py:obj:`shapely` package,
26+
For Windows and Mac OS based installations to use :py:obj:`consensus`
27+
function you might also need to install beforehand :py:obj:`shapely` package,
2828
which we found to work properly only under Anaconda distribution, with:
2929

3030
.. code-block:: bash

docs/source/userguide/utilities.rst

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -188,10 +188,3 @@ The points are colored according to class name. Each annotator is represented wi
188188

189189
.. image:: images/consensus_scatter.png
190190

191-
192-
Computing benchmark scores for instances between ground truth project and given project list
193-
============
194-
195-
196-
Benchmark is a tool to compare the quallity of the annotations of the same image that is present in several projects with
197-
the ground truth annotation of the same image that is in a separate project.

requirements_extra.txt

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1-
Sphinx==3.5.4
2-
Jinja2==3.0.3
3-
tox==3.24.2
4-
pytest==6.2.4
5-
pytest-xdist==2.3.0
6-
pytest-parallel==0.1.0
7-
pytest-rerunfailures==10.2
8-
sphinx_rtd_theme==1.0.0
9-
sphinx_inline_tabs==2022.1.2b11
1+
Sphinx==6.1.3
2+
Jinja2==3.1.2
3+
tox==4.4.5
4+
sphinx_rtd_theme==1.2.0
5+
furo==2022.12.7
106
jaraco.tidelift==1.5.0
117
sphinx-notfound-page==0.8.3
12-
furo==2022.12.7
13-
pytest-cov
8+
sphinx_inline_tabs==2022.1.2b11
9+
10+
pytest==7.2.1
11+
pytest-xdist==3.2.0
12+
pytest-parallel==0.1.1
13+
pytest-cov==4.0.0
14+
pytest-rerunfailures==10.2

src/superannotate/__init__.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
from packaging.version import parse # noqa
1313
from superannotate.lib.app.input_converters import convert_json_version # noqa
1414
from superannotate.lib.app.input_converters import convert_project_type # noqa
15-
from superannotate.lib.app.analytics.class_analytics import class_distribution # noqa
1615
from superannotate.lib.app.exceptions import AppException # noqa
1716
from superannotate.lib.app.input_converters import convert_json_version # noqa
1817
from superannotate.lib.app.input_converters import convert_project_type # noqa
@@ -43,8 +42,6 @@ def create_app(apps: typing.List[str] = None) -> SAServer:
4342
# Utils
4443
"enums",
4544
"AppException",
46-
# analytics
47-
"class_distribution",
4845
# converters
4946
"convert_json_version",
5047
"import_annotation",

src/superannotate/lib/app/analytics/class_analytics.py

Lines changed: 0 additions & 73 deletions
This file was deleted.

src/superannotate/lib/app/analytics/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -449,7 +449,7 @@ def consensus(df, item_name, annot_type):
449449
from shapely.geometry import Point, Polygon, box
450450
except ImportError:
451451
raise ImportError(
452-
"To use superannotate.benchmark or superannotate.consensus functions please install shapely package."
452+
"To use superannotate.consensus function please install shapely package."
453453
)
454454

455455
image_df = df[df["itemName"] == item_name]

src/superannotate/lib/app/interface/sdk_interface.py

Lines changed: 0 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import json
55
import os
66
import sys
7-
import tempfile
87
import warnings
98
from pathlib import Path
109
from typing import Callable
@@ -1794,74 +1793,6 @@ def download_model(self, model: MLModel, output_dir: Union[str, Path]):
17941793
else:
17951794
return BaseSerializer(res.data).serialize()
17961795

1797-
def benchmark(
1798-
self,
1799-
project: Union[NotEmptyStr, dict],
1800-
gt_folder: str,
1801-
folder_names: List[NotEmptyStr],
1802-
export_root: Optional[Union[str, Path]] = None,
1803-
image_list=None,
1804-
annot_type: Optional[ANNOTATION_TYPE] = "bbox",
1805-
show_plots=False,
1806-
):
1807-
"""Computes benchmark score for each instance of given images that are present both gt_project_name project
1808-
and projects in folder_names list:
1809-
1810-
:param project: project name or metadata of the project
1811-
:type project: str or dict
1812-
:param gt_folder: project folder name that contains the ground truth annotations
1813-
:type gt_folder: str
1814-
:param folder_names: list of folder names in the project for which the scores will be computed
1815-
:type folder_names: list of str
1816-
:param export_root: root export path of the projects
1817-
:type export_root: Path-like (str or Path)
1818-
:param image_list: List of image names from the projects list that must be used. If None,
1819-
then all images from the projects list will be used. Default: None
1820-
:type image_list: list
1821-
:param annot_type: Type of annotation instances to consider.
1822-
Available candidates are: ["bbox", "polygon", "point"]
1823-
:type annot_type: str
1824-
:param show_plots: If True, show plots based on results of consensus computation. Default: False
1825-
:type show_plots: bool
1826-
1827-
:return: Pandas DateFrame with columns
1828-
(creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score)
1829-
:rtype: pandas DataFrame
1830-
"""
1831-
project_name = project
1832-
if isinstance(project, dict):
1833-
project_name = project["name"]
1834-
1835-
project = self.controller.projects.get_by_name(project_name).data
1836-
if project.type not in constants.ProjectType.images:
1837-
raise AppException(LIMITED_FUNCTIONS[project.type])
1838-
1839-
if not export_root:
1840-
with tempfile.TemporaryDirectory() as temp_dir:
1841-
response = self.controller.benchmark(
1842-
project_name=project_name,
1843-
ground_truth_folder_name=gt_folder,
1844-
folder_names=folder_names,
1845-
export_root=temp_dir,
1846-
image_list=image_list,
1847-
annot_type=annot_type,
1848-
show_plots=show_plots,
1849-
)
1850-
1851-
else:
1852-
response = self.controller.benchmark(
1853-
project_name=project_name,
1854-
ground_truth_folder_name=gt_folder,
1855-
folder_names=folder_names,
1856-
export_root=export_root,
1857-
image_list=image_list,
1858-
annot_type=annot_type,
1859-
show_plots=show_plots,
1860-
)
1861-
if response.errors:
1862-
raise AppException(response.errors)
1863-
return response.data
1864-
18651796
def consensus(
18661797
self,
18671798
project: NotEmptyStr,

src/superannotate/lib/core/usecases/models.py

Lines changed: 0 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,7 @@
1515
import requests
1616
from botocore.exceptions import ClientError
1717
from lib.app.analytics.aggregators import DataAggregator
18-
from lib.app.analytics.common import aggregate_image_annotations_as_df
1918
from lib.app.analytics.common import consensus
20-
from lib.app.analytics.common import consensus_plot
2119
from lib.core.conditions import Condition
2220
from lib.core.conditions import CONDITION_EQ as EQ
2321
from lib.core.entities import FolderEntity
@@ -320,99 +318,6 @@ def execute(self):
320318
return self._response
321319

322320

323-
class BenchmarkUseCase(BaseUseCase):
324-
def __init__(
325-
self,
326-
project: ProjectEntity,
327-
ground_truth_folder_name: str,
328-
folder_names: list,
329-
export_dir: str,
330-
image_list: list,
331-
annotation_type: str,
332-
show_plots: bool,
333-
):
334-
super().__init__()
335-
self._project = project
336-
self._ground_truth_folder_name = ground_truth_folder_name
337-
self._folder_names = folder_names
338-
self._export_dir = export_dir
339-
self._image_list = image_list
340-
self._annotation_type = annotation_type
341-
self._show_plots = show_plots
342-
343-
def execute(self):
344-
project_df = aggregate_image_annotations_as_df(self._export_dir)
345-
gt_project_df = project_df[
346-
project_df["folderName"] == self._ground_truth_folder_name
347-
]
348-
benchmark_dfs = []
349-
for folder_name in self._folder_names:
350-
folder_df = project_df[project_df["folderName"] == folder_name]
351-
project_gt_df = pd.concat([folder_df, gt_project_df])
352-
project_gt_df = project_gt_df[project_gt_df["instanceId"].notna()]
353-
354-
if self._image_list is not None:
355-
project_gt_df = project_gt_df.loc[
356-
project_gt_df["itemName"].isin(self._image_list)
357-
]
358-
359-
project_gt_df.query("type == '" + self._annotation_type + "'", inplace=True)
360-
361-
project_gt_df = project_gt_df.groupby(
362-
["itemName", "instanceId", "folderName"]
363-
)
364-
365-
def aggregate_attributes(instance_df):
366-
def attribute_to_list(attribute_df):
367-
attribute_names = list(attribute_df["attributeName"])
368-
attribute_df["attributeNames"] = len(attribute_df) * [
369-
attribute_names
370-
]
371-
return attribute_df
372-
373-
attributes = None
374-
if not instance_df["attributeGroupName"].isna().all():
375-
attrib_group_name = instance_df.groupby("attributeGroupName")[
376-
["attributeGroupName", "attributeName"]
377-
].apply(attribute_to_list)
378-
attributes = dict(
379-
zip(
380-
attrib_group_name["attributeGroupName"],
381-
attrib_group_name["attributeNames"],
382-
)
383-
)
384-
385-
instance_df.drop(
386-
["attributeGroupName", "attributeName"], axis=1, inplace=True
387-
)
388-
instance_df.drop_duplicates(
389-
subset=["itemName", "instanceId", "folderName"], inplace=True
390-
)
391-
instance_df["attributes"] = [attributes]
392-
return instance_df
393-
394-
project_gt_df = project_gt_df.apply(aggregate_attributes).reset_index(
395-
drop=True
396-
)
397-
unique_images = set(project_gt_df["itemName"])
398-
all_benchmark_data = []
399-
for image_name in unique_images:
400-
image_data = image_consensus(
401-
project_gt_df, image_name, self._annotation_type
402-
)
403-
all_benchmark_data.append(pd.DataFrame(image_data))
404-
benchmark_project_df = pd.concat(all_benchmark_data, ignore_index=True)
405-
benchmark_project_df = benchmark_project_df[
406-
benchmark_project_df["folderName"] == folder_name
407-
]
408-
benchmark_dfs.append(benchmark_project_df)
409-
benchmark_df = pd.concat(benchmark_dfs, ignore_index=True)
410-
if self._show_plots:
411-
consensus_plot(benchmark_df, self._folder_names)
412-
self._response.data = benchmark_df
413-
return self._response
414-
415-
416321
class ConsensusUseCase(BaseUseCase):
417322
def __init__(
418323
self,

0 commit comments

Comments
 (0)